blob: 9500ba689090483f7e338aed7681632692c1e996 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000014#include <armnnUtils/DataLayoutIndexed.hpp>
15#include <armnnUtils/Permute.hpp>
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100128try \
129{ \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100134 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100135 std::string reasonIfUnsupported; \
136 supported = \
137 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
138 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100141 } \
142 else \
143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 if (reasonIfUnsupported.size() > 0) \
145 { \
146 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
147 } \
148 else \
149 { \
150 ALOGD("%s: not supported by armnn", funcName); \
151 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 } \
159 if (!supported) \
160 { \
161 ALOGD("%s: not supported by any specified backend", funcName); \
162 } \
163} \
164catch (const armnn::InvalidArgumentException &e) \
165{ \
166 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
167}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100168
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000169template<typename HalOperand>
170armnn::TensorShape GetTensorShapeForOperand(const HalOperand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100171{
172 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
173}
174
Matthew Bentham912b3622019-05-03 15:49:14 +0100175inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100176{
Matthew Bentham912b3622019-05-03 15:49:14 +0100177 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
178 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
179 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100180}
181
Mike Kellyb5fdf382019-06-11 16:35:25 +0100182#ifdef ARMNN_ANDROID_NN_V1_2
183
Keith Davis71006492020-01-06 17:44:16 +0000184// Support within the 1.2 driver for specific tensor data types
Mike Kellyb5fdf382019-06-11 16:35:25 +0100185inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
186{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000187 return type == V1_2::OperandType::BOOL ||
188 type == V1_2::OperandType::TENSOR_FLOAT16 ||
189 type == V1_2::OperandType::TENSOR_FLOAT32 ||
190 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Keith Davis71006492020-01-06 17:44:16 +0000191 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000192 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
193 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100194 type == V1_2::OperandType::TENSOR_INT32;
195}
196
197#endif
198
199inline bool IsBool(V1_0::Operand)
200{
201 return false;
202}
203
Sadik Armagan61113162019-07-25 09:09:40 +0100204inline bool Is12Operand(V1_0::Operand)
205{
206 return false;
207}
208
Mike Kellyb5fdf382019-06-11 16:35:25 +0100209#ifdef ARMNN_ANDROID_NN_V1_2
210
211inline bool IsBool(V1_2::Operand operand)
212{
213 return operand.type == V1_2::OperandType::BOOL;
214}
215
Sadik Armagan61113162019-07-25 09:09:40 +0100216/// Checks if a operand is 1_2 Operand
217inline bool Is12Operand(V1_2::Operand)
218{
219 return true;
220}
221
Mike Kellyb5fdf382019-06-11 16:35:25 +0100222#endif
223
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100224template<typename LayerHandleType>
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000225armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
226 LayerHandleType& inputLayer,
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100227 armnn::TensorInfo reshapeInfo)
228{
229 armnn::ReshapeDescriptor reshapeDescriptor;
230 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
231
232 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
233 BOOST_ASSERT(reshapeLayer != nullptr);
234
235 // Attach the input layer to the reshape layer
236 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
237 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
238
239 return *reshapeLayer;
240}
241
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000242bool BroadcastTensor(LayerInputHandle& input0,
243 LayerInputHandle& input1,
244 armnn::IConnectableLayer* startLayer,
245 ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100246{
247 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100248
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100249 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
250 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
251
252 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
253 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
254
255 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100256 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100257 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
258 input0.Connect(startLayer->GetInputSlot(0));
259 input1.Connect(startLayer->GetInputSlot(1));
260
Sadik Armagan64b19b52019-08-19 09:49:58 +0100261 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100262 }
263
264 // Since the number of dimensions do not match then we need to add degenerate dimensions
265 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
266
267 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
268 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
269 boost::numeric_cast<int>(inputDimensions1));
270
271 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
272 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
273 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
274
275 const armnn::TensorShape& smallShape = smallInfo.GetShape();
276 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
277 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
278 {
279 reshapedDimensions[i] = smallShape[i - sizeDifference];
280 }
281
282 armnn::TensorInfo reshapedInfo = smallInfo;
283 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
284 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100285
286 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
287 armnn::ReshapeDescriptor reshapeDescriptor;
288
289 bool isSupported = false;
290 FORWARD_LAYER_SUPPORT_FUNC(__func__,
291 IsReshapeSupported,
292 data.m_Backends,
293 isSupported,
294 reshapedInfo,
295 reshapeDescriptor);
296 if (!isSupported)
297 {
298 return false;
299 }
300
301 BOOST_ASSERT(data.m_Network != nullptr);
302 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100303
304 if (input0IsSmaller)
305 {
306 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
307 //
308 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100309 // | |
310 // Reshape |
311 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100312 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100313
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100314 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
315 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100316 }
317 else
318 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100319 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
320 //
321 // Input0 Input1
322 // | |
323 // | Reshape
324 // \ /
325 // StartLayer
326
arovir01b0717b52018-09-05 17:03:25 +0100327 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100328 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100329 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100330
331 return true;
arovir01b0717b52018-09-05 17:03:25 +0100332}
333
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +0000334void CalcPadding(uint32_t input,
335 uint32_t kernel,
336 uint32_t stride,
337 uint32_t& outPadHead,
338 uint32_t& outPadTail,
arovir01b0717b52018-09-05 17:03:25 +0100339 android::nn::PaddingScheme scheme)
340{
341 int32_t padHead;
342 int32_t padTail;
343 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
344 outPadHead = boost::numeric_cast<uint32_t>(padHead);
345 outPadTail = boost::numeric_cast<uint32_t>(padTail);
346}
347
Mike Kelly86b36d42019-07-12 16:39:33 +0100348#ifdef ARMNN_ANDROID_NN_V1_2
349
350void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
351 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
352{
353 int32_t padHead;
354 int32_t padTail;
355 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
356 outPadHead = boost::numeric_cast<uint32_t>(padHead);
357 outPadTail = boost::numeric_cast<uint32_t>(padTail);
358}
359
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100360void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
361 int32_t& outPadTail, android::nn::PaddingScheme scheme)
362{
363 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
364}
365
Mike Kelly86b36d42019-07-12 16:39:33 +0100366#endif
367
Matthew Bentham912b3622019-05-03 15:49:14 +0100368Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100369{
370 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100371 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100372 shape.dimensions = operand.dimensions;
373 shape.scale = operand.scale;
374 shape.offset = operand.zeroPoint;
375 return shape;
376}
377
Mike Kelly46272802019-08-14 17:00:48 +0100378#ifdef ARMNN_ANDROID_NN_V1_2
379
380Shape GetOperandShape(const V1_2::Operand& operand)
381{
382 Shape shape;
383 shape.type = OperandType(operand.type);
384 shape.dimensions = operand.dimensions;
385 shape.scale = operand.scale;
386 shape.offset = operand.zeroPoint;
387 return shape;
388}
389
390#endif
391
arovir01b0717b52018-09-05 17:03:25 +0100392// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
393// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100394// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
395// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100396void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000397 const armnn::TensorInfo& weightInfo,
398 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100399{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000400 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100401 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000402 // NOTE: Bias scale is always set to 0 for per-axis quantization and
403 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
404 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100405 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000406 return biasScale * inputInfo.GetQuantizationScale();
407 };
408
409 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
410 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
411
412 biasInfo.SetQuantizationScales(biasScales);
413 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
414
415 ALOGV("Bias quantization params have been updated for per-axis quantization");
416 }
417 else
418 {
419 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
420 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
421 {
422 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
423 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
424 {
425 ALOGW("Bias quantization scale has been modified to match input * weights");
426 biasInfo.SetQuantizationScale(expectedBiasScale);
427 }
arovir01b0717b52018-09-05 17:03:25 +0100428 }
429 }
430}
431
432// 4D Tensor Permutations
433const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
434const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
435const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
436const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
437
438// 3D Permutation Vectors
439const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
440const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
441const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
442
443template<typename OSlot>
444armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
445 const armnn::PermutationVector& mappings)
446{
447 // Add swizzle layer
448 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
449
450 BOOST_ASSERT(layer != nullptr);
451
452 // Connect input to swizzle layer
453 input.Connect(layer->GetInputSlot(0));
454
455 // Setup swizzled output
456 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
457 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
458
459 return *layer;
460}
461
462void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
463{
464 // Add swizzle layer
465 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
466 // Connect swizzled input to layer
467 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
468}
469
470armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
471{
472 // Add deswizzle layer
473 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
474 return deswizzleLayer;
475}
476
477// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
478armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
479 LayerInputHandle& input,
480 armnn::IConnectableLayer& firstLayer,
481 armnn::IConnectableLayer& lastLayer)
482{
483 SwizzleIn(network, input, firstLayer, 0);
484 return DeswizzleOut(network, lastLayer, 0);
485}
486
487// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
488armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
489 armnn::IConnectableLayer& layer)
490{
491 return SwizzleInDeswizzleOut(network, input, layer, layer);
492}
493
494bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
495 const armnn::TensorShape & outputShape,
496 uint32_t concatDim)
497{
498 // Validate the output shape is correct given the input shapes (which have just been validated)
499 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
500 if (outputShape.GetNumDimensions() != numDimensions)
501 {
502 return Fail("%s: Output shape has wrong number of dimensions", __func__);
503 }
504
505 unsigned int outputSizeAlongConcatenatedDimension = 0;
506 for (unsigned int i = 0; i < inputShapes.size(); i++)
507 {
508 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
509 }
510
511 for (unsigned int i = 0; i < numDimensions; ++i)
512 {
513 if (i == concatDim)
514 {
515 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
516 {
517 return Fail(
518 "%s: Invalid output shape for dimension %d (%d != %d)",
519 __func__,
520 i,
521 outputShape[i],
522 outputSizeAlongConcatenatedDimension);
523 }
524 }
525 else
526 {
527 if (outputShape[i] != inputShapes[0][i])
528 {
529 return Fail("%s: Invalid output shape", __func__);
530 }
531 }
532 }
533
534 return true;
535}
536
537bool RequiresReshape(armnn::TensorShape & inputShape)
538{
539 return inputShape.GetNumDimensions() < 3;
540}
541
arovir01b0717b52018-09-05 17:03:25 +0100542void SwizzleInputs(armnn::INetwork& network,
543 std::vector<LayerInputHandle>& inputs,
544 std::vector<armnn::TensorShape>& inputShapes,
545 const armnn::PermutationVector& mapping)
546{
547 if (!mapping.IsEqual(IdentityPermutation4D))
548 {
549 size_t nInputs = inputs.size();
550 for (size_t i=0; i<nInputs; ++i)
551 {
552 // add swizzle layer
553 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
554 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
555 auto& outputInfo = outputSlot.GetTensorInfo();
556 // replace inputs with the swizzled ones
557 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
558 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
559 }
560 }
561}
562
narpra01f176d5a2018-11-18 20:17:48 +0000563bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
564 int32_t & concatDimension,
565 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100566{
narpra01f176d5a2018-11-18 20:17:48 +0000567 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100568 BOOST_ASSERT(numberOfDimensions >= 3);
569
570 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000571 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
572 // or along dimension 0 or 2 for a 3-D tensor.
573 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100574 {
narpra01f176d5a2018-11-18 20:17:48 +0000575 concatDimension = 1;
576 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
577 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100578 }
narpra01f176d5a2018-11-18 20:17:48 +0000579 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100580 {
narpra01f176d5a2018-11-18 20:17:48 +0000581 concatDimension = 0;
582 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
583 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100584 }
narpra01f176d5a2018-11-18 20:17:48 +0000585 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100586}
587
588} // anonymous namespace
589
590namespace armnn_driver
591{
592
593//// Creates an ArmNN activation layer and connects it to the given layer, if the
594//// passed in AndroidNN activation function requires so.
595//// @return The end layer of the sequence of layers built for the given AndroidNN
596//// activation function or nullptr if an error occurred (e.g. unsupported activation).
597//// Note that the end layer matches the input layer if no activation is required
598//// (the sequence of layers has length 1).
599armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
600 ActivationFn activation,
601 armnn::IConnectableLayer* prevLayer,
602 ConversionData& data);
603
604} // namespace armnn_driver
605
606///
607/// Utility templates
608///
609
610namespace armnn_driver
611{
612
613using namespace android::nn;
614
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100615template<typename HalPolicy,
616 typename HalOperand = typename HalPolicy::Operand,
617 typename HalOperation = typename HalPolicy::Operation,
618 typename HalModel = typename HalPolicy::Model>
619const HalOperand* GetInputOperand(const HalOperation& operation,
620 uint32_t inputIndex,
621 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100622 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100623{
624 if (inputIndex >= operation.inputs.size())
625 {
saoste01b8471482018-10-10 09:44:51 +0100626 if (failOnIndexOutOfBounds)
627 {
628 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
629 }
arovir01b0717b52018-09-05 17:03:25 +0100630 return nullptr;
631 }
632
633 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
634 return &model.operands[operation.inputs[inputIndex]];
635}
636
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100637template<typename HalPolicy,
638 typename HalOperand = typename HalPolicy::Operand,
639 typename HalOperation = typename HalPolicy::Operation,
640 typename HalModel = typename HalPolicy::Model>
641const HalOperand* GetOutputOperand(const HalOperation& operation,
642 uint32_t outputIndex,
643 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100644{
645 if (outputIndex >= operation.outputs.size())
646 {
647 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
648 return nullptr;
649 }
650
651 // Model should have been validated beforehand
652 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
653
654 return &model.operands[operation.outputs[outputIndex]];
655}
656
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100657template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100658 typename HalOperand = typename HalPolicy::Operand,
659 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100660const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100661 const HalModel& model,
662 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000663 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100664{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100665 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100666
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100667 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100668 switch (operand.lifetime)
669 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100670 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100671 {
672 // Constant found in model.operandValues
673 valueStart = &model.operandValues[operand.location.offset];
674 break;
675 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100676 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100677 {
678 // Constant specified via a Memory object
679 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
680 break;
681 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100682 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000683 {
684 // An optional input tensor with no values is not an error so should not register as a fail
685 if (optional)
686 {
687 valueStart = nullptr;
688 break;
689 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100690 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000691 }
arovir01b0717b52018-09-05 17:03:25 +0100692 default:
693 {
694 // Unsupported/invalid (e.g. can't get value of an input to the model)
695 Fail("%s: unsupported/invalid operand lifetime: %s",
696 __func__, toString(operand.lifetime).c_str());
697 valueStart = nullptr;
698 }
699 }
700
701 return valueStart;
702}
703
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100704template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100705 typename HalOperation = typename HalPolicy::Operation,
706 typename HalModel = typename HalPolicy::Model,
707 typename HalOperandType = typename HalPolicy::OperandType>
708bool GetOperandType(const HalOperation& operation,
709 uint32_t inputIndex,
710 const HalModel& model,
711 HalOperandType& type)
712{
713 using HalOperand = typename HalPolicy::Operand;
714
715 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
716 if (!operand)
717 {
718 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
719 }
720
721 type = operand->type;
722 return true;
723}
724
725template<typename HalPolicy,
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000726 typename HalOperand = typename HalPolicy::Operand>
727bool IsOperandConstant(const HalOperand& operand)
728{
729 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
730
731 HalOperandLifeTime lifetime = operand.lifetime;
732
733 return lifetime == HalOperandLifeTime::CONSTANT_COPY ||
734 lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
735 lifetime == HalOperandLifeTime::NO_VALUE;
736}
737
738template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100739 typename HalOperand = typename HalPolicy::Operand,
740 typename HalModel = typename HalPolicy::Model>
741ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
742 const HalModel& model,
743 const ConversionData& data,
744 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
745 const armnn::TensorShape* overrideTensorShape = nullptr,
746 bool optional = false)
747{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100748 if (!IsOperandTypeSupportedForTensors(operand.type))
749 {
750 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
751 return ConstTensorPin();
752 }
753
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +0000754 if (!optional && !IsOperandConstant<HalPolicy>(operand))
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100755 {
756 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
757 return ConstTensorPin();
758 }
759
760 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
761 if (!valueStart)
762 {
763 if (optional)
764 {
765 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
766 return ConstTensorPin(true);
767 }
768 // mandatory tensor with no values
769 Fail("%s: failed to get operand address", __func__);
770 return ConstTensorPin();
771 }
772
773 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
Teresa Charlin02dce092019-11-11 17:06:23 +0000774 // Android datalayout might be different than armnn datalayout, e.g. the kernel for the depthwise convolution.
775 if (tensorInfo.HasPerAxisQuantization())
776 {
777 tensorInfo.SetQuantizationDim(dimensionMappings[tensorInfo.GetQuantizationDim().value()]);
778 }
779
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100780 if (overrideTensorShape != nullptr)
781 {
782 tensorInfo.SetShape(*overrideTensorShape);
783 }
784 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
785}
786
787template<typename HalPolicy,
788 typename HalOperation = typename HalPolicy::Operation,
789 typename HalModel = typename HalPolicy::Model>
790ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
791 uint32_t inputIndex,
792 const HalModel& model,
793 const ConversionData& data,
794 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
795 const armnn::TensorShape* overrideTensorShape = nullptr,
796 bool optional = false)
797{
798 using HalOperand = typename HalPolicy::Operand;
799
800 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
801 if (!operand)
802 {
803 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
804 return ConstTensorPin();
805 }
806 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
807 model,
808 data,
809 dimensionMappings,
810 overrideTensorShape,
811 optional);
812}
813
814template<typename HalPolicy,
815 typename OutputType,
816 typename HalOperandType = typename HalPolicy::OperandType,
817 typename HalOperation = typename HalPolicy::Operation,
818 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100819bool GetInputScalar(const HalOperation& operation,
820 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100821 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100822 OutputType& outValue,
823 const HalModel& model,
824 const ConversionData& data)
825{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100826 using HalOperand = typename HalPolicy::Operand;
827
828 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100829 if (!operand)
830 {
831 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
832 }
833
834 if (operand->type != type)
835 {
836 return Fail("%s: unexpected operand type: %s (should be %s)",
837 __func__, toString(operand->type).c_str(), toString(type).c_str());
838 }
839
840 if (operand->location.length != sizeof(OutputType))
841 {
842 return Fail("%s: incorrect operand location length: %i (should be %i)",
843 __func__, operand->location.length, sizeof(OutputType));
844 }
845
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100846 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100847 if (!valueAddress)
848 {
849 return Fail("%s: failed to get address for operand", __func__);
850 }
851
852 outValue = *(static_cast<const OutputType*>(valueAddress));
853 return true;
854}
855
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100856template<typename HalPolicy,
857 typename HalOperation = typename HalPolicy::Operation,
858 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100859bool GetInputInt32(const HalOperation& operation,
860 uint32_t inputIndex,
861 int32_t& outValue,
862 const HalModel& model,
863 const ConversionData& data)
864{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100865 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100866}
867
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100868template<typename HalPolicy,
869 typename HalOperation = typename HalPolicy::Operation,
870 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100871bool GetInputFloat32(const HalOperation& operation,
872 uint32_t inputIndex,
873 float& outValue,
874 const HalModel& model,
875 const ConversionData& data)
876{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100877 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100878}
879
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100880template<typename HalPolicy,
881 typename HalOperation = typename HalPolicy::Operation,
882 typename HalOperandType = typename HalPolicy::OperandType,
883 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100884bool GetInputActivationFunctionImpl(const HalOperation& operation,
885 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100886 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100887 ActivationFn& outActivationFunction,
888 const HalModel& model,
889 const ConversionData& data)
890{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100891 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100892 {
893 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
894 __func__,
895 toString(type).c_str(),
896 toString(OperandType::INT32).c_str(),
897 toString(OperandType::TENSOR_INT32).c_str());
898 }
899
900 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100901 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100902 {
903 return Fail("%s: failed to get activation input value", __func__);
904 }
905 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
906 return true;
907}
908
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100909template<typename HalPolicy,
910 typename HalOperation = typename HalPolicy::Operation,
911 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100912bool GetInputActivationFunction(const HalOperation& operation,
913 uint32_t inputIndex,
914 ActivationFn& outActivationFunction,
915 const HalModel& model,
916 const ConversionData& data)
917{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100918 return GetInputActivationFunctionImpl<HalPolicy>(operation,
919 inputIndex,
920 HalPolicy::OperandType::INT32,
921 outActivationFunction,
922 model,
923 data);
arovir01b0717b52018-09-05 17:03:25 +0100924}
925
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100926template<typename HalPolicy,
927 typename HalOperation = typename HalPolicy::Operation,
928 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100929bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
930 uint32_t inputIndex,
931 ActivationFn& outActivationFunction,
932 const HalModel& model,
933 const ConversionData& data)
934{
935 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100936 return GetInputActivationFunctionImpl<HalPolicy>(operation,
937 inputIndex,
938 HalPolicy::OperandType::INT32,
939 outActivationFunction,
940 model,
941 data);
arovir01b0717b52018-09-05 17:03:25 +0100942}
943
944
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100945template<typename HalPolicy,
946 typename HalOperation = typename HalPolicy::Operation,
947 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100948bool GetOptionalInputActivation(const HalOperation& operation,
949 uint32_t inputIndex,
950 ActivationFn& activationFunction,
951 const HalModel& model,
952 const ConversionData& data)
953{
954 if (operation.inputs.size() <= inputIndex)
955 {
956 activationFunction = ActivationFn::kActivationNone;
957 }
958 else
959 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100960 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100961 {
962 return Fail("%s: Operation has invalid inputs", __func__);
963 }
964 }
965 return true;
966}
967
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100968template<typename HalPolicy,
969 typename ConvolutionDescriptor,
970 typename HalOperation = typename HalPolicy::Operation,
971 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100972bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
973 uint32_t dilationXIndex,
974 ConvolutionDescriptor& descriptor,
975 const HalModel& model,
976 const ConversionData& data)
977{
978 bool success = true;
979 if (operation.inputs.size() >= dilationXIndex + 2)
980 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100981 success &= GetInputScalar<HalPolicy>(operation,
982 dilationXIndex,
983 HalPolicy::OperandType::INT32,
984 descriptor.m_DilationX,
985 model,
986 data);
987 success &= GetInputScalar<HalPolicy>(operation,
988 dilationXIndex + 1,
989 HalPolicy::OperandType::INT32,
990 descriptor.m_DilationY,
991 model,
992 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100993 }
994
995 return success;
996}
997
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100998template<typename HalPolicy,
999 typename HalOperand = typename HalPolicy::Operand,
1000 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001001bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +01001002 std::vector<int32_t>& outValues,
1003 const HalModel& model,
1004 const ConversionData& data)
1005{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001006 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +01001007 {
1008 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1009 }
1010
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001011 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001012 if (!startAddress)
1013 {
1014 return Fail("%s: failed to get operand address", __func__, operand.type);
1015 }
1016
1017 // Check number of bytes is sensible
1018 const uint32_t numBytes = operand.location.length;
1019 if (numBytes % sizeof(int32_t) != 0)
1020 {
1021 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1022 __func__, numBytes, sizeof(int32_t));
1023 }
1024
1025 outValues.resize(numBytes / sizeof(int32_t));
1026 memcpy(outValues.data(), startAddress, numBytes);
1027 return true;
1028}
1029
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001030template<typename HalPolicy,
1031 typename HalOperation = typename HalPolicy::Operation,
1032 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001033bool GetInputPaddingScheme(const HalOperation& operation,
1034 uint32_t inputIndex,
1035 PaddingScheme& outPaddingScheme,
1036 const HalModel& model,
1037 const ConversionData& data)
1038{
1039 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001040 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001041 {
1042 return Fail("%s: failed to get padding scheme input value", __func__);
1043 }
1044
1045 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1046 return true;
1047}
1048
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001049template<typename HalPolicy,
1050 typename HalOperation = typename HalPolicy::Operation,
1051 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001052LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1053 uint32_t inputIndex,
1054 const HalModel& model,
1055 ConversionData& data)
1056{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001057 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001058 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001059 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1060
1061 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001062 if (!operand)
1063 {
1064 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1065 return LayerInputHandle();
1066 }
1067
1068 if (!IsOperandTypeSupportedForTensors(operand->type))
1069 {
1070 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1071 return LayerInputHandle();
1072 }
1073
Sadik Armagan44bcc022019-06-18 17:21:36 +01001074 try
arovir01b0717b52018-09-05 17:03:25 +01001075 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001076 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001077 if (IsDynamicTensor(operandTensorInfo))
1078 {
1079 Fail("%s: dynamic input tensors are not supported", __func__);
1080 return LayerInputHandle();
1081 }
arovir01b0717b52018-09-05 17:03:25 +01001082
Sadik Armagan44bcc022019-06-18 17:21:36 +01001083 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001084 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001085 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001086 {
1087 // NOTE: We must check whether we can support the input tensor on at least one
1088 // of the provided backends; otherwise we cannot convert the operation
1089 bool isInputSupported = false;
1090 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1091 IsInputSupported,
1092 data.m_Backends,
1093 isInputSupported,
1094 operandTensorInfo);
1095
1096 if (!isInputSupported)
1097 {
1098 Fail("%s: unsupported input tensor", __func__);
1099 return LayerInputHandle();
1100 }
1101
1102 BOOST_FALLTHROUGH; // intentional fallthrough
1103 }
1104 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001105 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001106 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001107 // The tensor is either an operand internal to the model, or a model input.
1108 // It can be associated with an ArmNN output slot for an existing layer.
1109
1110 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1111 const uint32_t operandIndex = operation.inputs[inputIndex];
1112 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001113 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001114 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001115 case HalOperandLifeTime::CONSTANT_REFERENCE:
1116 {
1117 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1118 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1119 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001120 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001121 bool isSupported = false;
1122 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1123 IsConstantSupported,
1124 data.m_Backends,
1125 isSupported,
1126 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001127 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001128 {
1129 return LayerInputHandle();
1130 }
1131
1132 armnn::IConnectableLayer* constantLayer =
1133 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1134 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1135 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1136
1137 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1138 }
1139 else
1140 {
1141 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001142 return LayerInputHandle();
1143 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001144 break;
arovir01b0717b52018-09-05 17:03:25 +01001145 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001146 default:
arovir01b0717b52018-09-05 17:03:25 +01001147 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001148 // Unsupported lifetime for an input tensor
1149 Fail("%s: unsupported lifetime for input tensor: %s",
1150 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001151 return LayerInputHandle();
1152 }
arovir01b0717b52018-09-05 17:03:25 +01001153 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001154 }
1155 catch (UnsupportedOperand<HalOperandType>& e)
1156 {
1157 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1158 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001159 }
1160}
1161
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001162template<typename HalPolicy,
1163 typename HalOperation = typename HalPolicy::Operation,
1164 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001165bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1166 uint32_t operationOutputIndex,
1167 armnn::IConnectableLayer& layer,
1168 uint32_t layerOutputIndex,
1169 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001170 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001171{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001172 using HalOperand = typename HalPolicy::Operand;
1173
1174 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001175 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1176 {
1177 return false;
1178 }
1179
1180 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1181
1182 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1183 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1184
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001185 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001186
1187 return true;
1188}
1189
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001190template<typename HalPolicy,
1191 typename HalOperation = typename HalPolicy::Operation,
1192 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001193armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1194 uint32_t inputIndex,
1195 const HalModel& model,
1196 ConversionData& data)
1197{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001198 using HalOperand = typename HalPolicy::Operand;
1199
1200 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001201 if (!operand)
1202 {
1203 return armnn::DataLayout::NHWC;
1204 }
1205
1206 if (!IsBool(*operand))
1207 {
1208 return armnn::DataLayout::NHWC;
1209 }
1210
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001211 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001212 if (!valueAddress)
1213 {
1214 return armnn::DataLayout::NHWC;
1215 }
1216
1217 if (*(static_cast<const bool*>(valueAddress)))
1218 {
1219 return armnn::DataLayout::NCHW;
1220 }
1221 else
1222 {
1223 return armnn::DataLayout::NHWC;
1224 }
1225}
1226
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001227template<typename HalPolicy,
1228 typename HalOperation = typename HalPolicy::Operation,
1229 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001230bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1231 uint32_t outputIndex,
1232 armnn::IConnectableLayer& layer,
1233 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001234 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001235{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001236 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1237 outputIndex,
1238 layer,
1239 outputIndex,
1240 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001241 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001242}
1243
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001244template<typename HalPolicy,
1245 typename HalOperation = typename HalPolicy::Operation,
1246 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001247bool ConvertToActivation(const HalOperation& operation,
1248 const char* operationName,
1249 const armnn::ActivationDescriptor& activationDesc,
1250 const HalModel& model,
1251 ConversionData& data)
1252{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001253 using HalOperand = typename HalPolicy::Operand;
1254
1255 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001256 if (!input.IsValid())
1257 {
1258 return Fail("%s: Input 0 is invalid", operationName);
1259 }
1260
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001261 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001262 if (!outputOperand)
1263 {
1264 return false;
1265 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001266
1267 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001268 if (IsDynamicTensor(outInfo))
1269 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001270 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001271 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001272
1273 bool isSupported = false;
1274 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1275 IsActivationSupported,
1276 data.m_Backends,
1277 isSupported,
1278 input.GetTensorInfo(),
1279 outInfo,
1280 activationDesc);
1281 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001282 {
1283 return false;
1284 }
1285
1286 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1287 BOOST_ASSERT(layer != nullptr);
1288 input.Connect(layer->GetInputSlot(0));
1289
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001290 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001291}
1292
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001293template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001294 typename HalOperation = typename HalPolicy::Operation,
1295 typename HalModel = typename HalPolicy::Model>
1296bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1297{
1298 armnn::ActivationDescriptor desc;
1299 desc.m_Function = armnn::ActivationFunction::ReLu;
1300
1301 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1302}
1303
1304template<typename HalPolicy,
1305 typename HalOperation = typename HalPolicy::Operation,
1306 typename HalModel = typename HalPolicy::Model>
1307bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1308{
1309 armnn::ActivationDescriptor desc;
1310 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1311 desc.m_A = 1.0f;
1312 desc.m_B = -1.0f;
1313
1314 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1315}
1316
1317template<typename HalPolicy,
1318 typename HalOperation = typename HalPolicy::Operation,
1319 typename HalModel = typename HalPolicy::Model>
1320bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1321{
1322 armnn::ActivationDescriptor desc;
1323 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1324 desc.m_A = 6.0f;
1325
1326 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1327}
1328
1329template<typename HalPolicy,
1330 typename HalOperation = typename HalPolicy::Operation,
1331 typename HalModel = typename HalPolicy::Model>
1332bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1333{
1334 armnn::ActivationDescriptor desc;
1335 desc.m_Function = armnn::ActivationFunction::TanH;
1336 desc.m_A = 1.0f; // android nn does not support tanH parameters
1337 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1338
1339 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1340}
1341
1342template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001343 typename HalOperation = typename HalPolicy::Operation,
1344 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001345bool ConvertPaddings(const HalOperation& operation,
1346 const HalModel& model,
1347 ConversionData& data,
1348 unsigned int rank,
1349 armnn::PadDescriptor& padDescriptor)
1350{
1351 using HalOperand = typename HalPolicy::Operand;
1352
1353 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1354 if (!paddingsOperand)
1355 {
1356 return Fail("%s: Could not read paddings operand", __func__);
1357 }
1358
1359 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1360 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1361 {
1362 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1363 }
1364
1365 std::vector<int32_t> paddings;
1366 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1367
1368 // add padding for each dimension of input tensor.
1369 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1370 {
1371 int paddingBeforeInput = paddings[i];
1372 int paddingAfterInput = paddings[i + 1];
1373
1374 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1375 {
1376 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1377 }
1378
1379 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1380 }
1381
1382 return true;
1383}
1384
1385template<typename HalPolicy,
1386 typename HalOperation = typename HalPolicy::Operation,
1387 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001388bool ConvertPooling2d(const HalOperation& operation,
1389 const char* operationName,
1390 armnn::PoolingAlgorithm poolType,
1391 const HalModel& model,
1392 ConversionData& data)
1393{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001394 using HalOperand = typename HalPolicy::Operand;
1395 using HalOperandType = typename HalPolicy::OperandType;
1396
1397 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001398 if (!input.IsValid())
1399 {
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001400 return Fail("%s: Operation Could not read input 0", operationName);
arovir01b0717b52018-09-05 17:03:25 +01001401 }
1402
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001403 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001404 if (!output)
1405 {
1406 return Fail("%s: Could not read output 0", __func__);
1407 }
1408
1409 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1410 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1411
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001412 if (IsDynamicTensor(outputInfo))
1413 {
1414 return Fail("%s: Dynamic output tensors are not supported", __func__);
1415 }
1416
arovir01b0717b52018-09-05 17:03:25 +01001417 armnn::Pooling2dDescriptor desc;
1418 desc.m_PoolType = poolType;
1419 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001420 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001421
1422 ActivationFn activation;
1423
Sadik Armagan15d63e22019-07-26 16:59:35 +01001424 auto inputSize = operation.inputs.size();
1425
1426 if (inputSize >= 10)
1427 {
1428 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1429 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1430 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1431 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1432 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1433 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1434 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1435 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1436 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1437 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1438 {
1439 return Fail("%s: Operation has invalid inputs", operationName);
1440 }
1441
1442 if (Is12Operand(*output))
1443 {
1444 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1445 }
1446 }
1447 else
arovir01b0717b52018-09-05 17:03:25 +01001448 {
1449 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1450 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001451 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1452 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1453 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1454 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1455 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1456 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001457 {
1458 return Fail("%s: Operation has invalid inputs", operationName);
1459 }
1460
Sadik Armagan15d63e22019-07-26 16:59:35 +01001461 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001462 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001463 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001464 }
FinnWilliamsArm493e9b72019-11-25 16:02:07 +00001465
1466 const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
1467 const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
1468 const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
1469
1470 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1471 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
arovir01b0717b52018-09-05 17:03:25 +01001472 }
1473
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001474 bool isSupported = false;
1475 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1476 IsPooling2dSupported,
1477 data.m_Backends,
1478 isSupported,
1479 inputInfo,
1480 outputInfo,
1481 desc);
1482 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001483 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001484 return false;
arovir01b0717b52018-09-05 17:03:25 +01001485 }
arovir01b0717b52018-09-05 17:03:25 +01001486
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001487 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1488 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001489 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001490 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001491 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001492
1493 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1494 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001495 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001496 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001497 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001498
1499 input.Connect(pooling2dLayer->GetInputSlot(0));
1500
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001501 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001502}
1503
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001504template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001505 typename HalOperation = typename HalPolicy::Operation,
1506 typename HalModel = typename HalPolicy::Model>
1507bool ConvertAdd(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01001508{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001509 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01001510
1511 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1512 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1513
1514 if (!input0.IsValid() || !input1.IsValid())
1515 {
1516 return Fail("%s: Operation has invalid inputs", __func__);
1517 }
1518
1519 // The FuseActivation parameter is always the input index 2
1520 // and it should be optional
1521 ActivationFn activationFunction;
1522 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1523 {
1524 return Fail("%s: Operation has invalid inputs", __func__);
1525 }
1526
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001527 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01001528 if (!outputOperand)
1529 {
1530 return false;
1531 }
1532
1533 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1534 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1535
1536 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1537 if (IsDynamicTensor(outputInfo))
1538 {
1539 return Fail("%s: Dynamic output tensors are not supported", __func__);
1540 }
1541
1542 bool isSupported = false;
1543 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1544 IsAdditionSupported,
1545 data.m_Backends,
1546 isSupported,
1547 inputInfo0,
1548 inputInfo1,
1549 outputInfo);
1550 if (!isSupported)
1551 {
1552 return false;
1553 }
1554
1555 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1556 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1557
1558 if (endLayer != nullptr)
1559 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01001560 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1561 if (!isReshapeSupported)
1562 {
1563 return false;
1564 }
1565
Mike Kelly46272802019-08-14 17:00:48 +01001566 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1567 }
1568 else
1569 {
1570 return Fail("%s: ProcessActivation failed", __func__);
1571 }
1572}
1573
1574template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001575 typename HalOperation = typename HalPolicy::Operation,
1576 typename HalModel = typename HalPolicy::Model>
1577bool ConvertArgMinMax(const HalOperation& operation,
1578 const HalModel& model,
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001579 ConversionData& data,
1580 armnn::ArgMinMaxFunction argMinMaxFunction)
1581{
1582 ALOGV("argMinMaxFunction = %s", GetArgMinMaxFunctionAsCString(argMinMaxFunction));
1583
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001584 using HalOperand = typename HalPolicy::Operand;
Francis Murtagh19fa0cc2019-11-19 12:06:47 +00001585 using HalOperandType = typename HalPolicy::OperandType;
1586
1587 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1588
1589 if (!input0.IsValid())
1590 {
1591 return Fail("%s: Operation has invalid inputs", __func__);
1592 }
1593
1594 int32_t axis;
1595 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, axis, model, data))
1596 {
1597 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
1598 }
1599
1600 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
1601 int rank = static_cast<int>(inputInfo.GetNumDimensions());
1602
1603 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
1604 {
1605 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
1606 // E.g. Rank 4 tensor can have axis in range [-4, 3)
1607 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
1608 return Fail("%s: Axis must be in range [-n, n)", __func__);
1609 }
1610
1611 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1612 if (!output)
1613 {
1614 return Fail("%s: Could not read output 0", __func__);
1615 }
1616
1617 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1618
1619 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1620 if (IsDynamicTensor(outputInfo))
1621 {
1622 return Fail("%s: Dynamic output tensors are not supported", __func__);
1623 }
1624
1625 armnn::ArgMinMaxDescriptor descriptor;
1626 descriptor.m_Function = argMinMaxFunction;
1627 descriptor.m_Axis = axis;
1628
1629 bool isSupported = false;
1630 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1631 IsArgMinMaxSupported,
1632 data.m_Backends,
1633 isSupported,
1634 inputInfo0,
1635 outputInfo,
1636 descriptor);
1637 if (!isSupported)
1638 {
1639 return false;
1640 }
1641
1642 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
1643 assert(layer != nullptr);
1644
1645 input0.Connect(layer->GetInputSlot(0));
1646
1647 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1648}
1649
1650template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001651 typename HalOperation = typename HalPolicy::Operation,
1652 typename HalModel = typename HalPolicy::Model>
1653bool ConvertConcatenation(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kellyb8805202019-07-31 17:25:43 +01001654{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00001655 using HalOperand = typename HalPolicy::Operand;
Mike Kellyb8805202019-07-31 17:25:43 +01001656 using HalOperandType = typename HalPolicy::OperandType;
1657
1658 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1659 if (operation.inputs.size() <= 1)
1660 {
1661 return Fail("%s: Operation has insufficient arguments", __func__);
1662 }
1663
1664 // Get inputs and outputs
1665 const std::size_t numInputTensors = operation.inputs.size() - 1;
1666
1667 int32_t concatDim;
1668 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1669 {
1670 return Fail("%s: Operation has invalid inputs", __func__);
1671 }
1672
1673 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1674 if (!outputOperand)
1675 {
1676 return Fail("%s: Operation has no outputs", __func__);
1677 }
1678
1679
1680 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1681 armnn::TensorShape outputShape = outputInfo.GetShape();
1682
1683 //
1684 // handle negative concat dims along the lines of tensorflow as described here:
1685 // https://www.tensorflow.org/api_docs/python/tf/concat
1686 // "negative axis refers to axis + rank(values)-th dimension"
1687 //
1688 if (concatDim < 0)
1689 {
1690 concatDim += outputShape.GetNumDimensions();
1691 }
1692
1693 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1694 {
1695 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1696 }
1697
1698 std::vector<LayerInputHandle> inputHandles;
1699 std::vector<armnn::TensorShape> inputShapes;
1700
1701 inputHandles.reserve(numInputTensors);
1702 inputShapes.reserve(numInputTensors);
1703
1704 bool inputsHaveBeenReshaped = false;
1705 unsigned int tensorDimensionsAdded = 0;
1706
1707 for (uint32_t i = 0; i < numInputTensors; ++i)
1708 {
1709 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1710 if (!operand)
1711 {
1712 return Fail("%s: Operation has invalid inputs", __func__);
1713 }
1714
Teresa Charlin3b959602019-10-31 17:05:47 +00001715 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1716 if (!operandInputHandle.IsValid())
1717 {
1718 return Fail("%s: Operation has invalid inputs", __func__);
1719 }
Mike Kellyb8805202019-07-31 17:25:43 +01001720
Teresa Charlin3b959602019-10-31 17:05:47 +00001721 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001722 if (operandShape.GetNumDimensions() == 0)
1723 {
1724 return Fail("%s: Operands with rank 0 are not supported", __func__);
1725 }
1726
1727 if (RequiresReshape(operandShape))
1728 {
1729 inputsHaveBeenReshaped = true;
1730
1731 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1732
1733 // Expand the tensor to three dimensions
1734 if (operandShape.GetNumDimensions() == 2)
1735 {
1736 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1737 tensorDimensionsAdded = 1;
1738 }
1739 else
1740 {
1741 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1742 tensorDimensionsAdded = 2;
1743 }
1744
1745 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1746 *data.m_Network,
1747 operandInputHandle,
1748 reshapeInfo
1749 );
1750
1751 // Point to the reshape operation rather then the input operation
1752 operandShape = reshapeInfo.GetShape();
1753 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1754 }
1755
1756 inputShapes.emplace_back(operandShape);
1757 inputHandles.emplace_back(operandInputHandle);
1758
1759 if (!inputHandles.back().IsValid())
1760 {
1761 return Fail("%s: Operation has invalid inputs", __func__);
1762 }
1763 }
1764
1765 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1766
1767 if (inputsHaveBeenReshaped)
1768 {
1769 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1770 concatDim += tensorDimensionsAdded;
1771
1772 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1773 if (tensorDimensionsAdded == 1)
1774 {
1775 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1776 }
1777 else if (tensorDimensionsAdded == 2)
1778 {
1779 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1780 }
1781 }
1782
1783 // Check if permutations is required and get the pair of permutations required for the concatenation.
1784 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1785 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1786 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1787
1788 bool needPermute =
1789 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1790
1791 if (needPermute)
1792 {
1793 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1794 }
1795
1796 outputInfo.SetShape(outputShape);
1797
1798 // this is no-op for identity swizzles, otherwise it replaces both
1799 // the handles and shapes with the swizzled layer output handles and shapes
1800 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
1801
1802 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1803 armnn::OriginsDescriptor concatDescriptor;
1804
1805 try
1806 {
1807 // The concat descriptor is always created across the only supported concat dimension
1808 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1809 concatDescriptor =
1810 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1811 }
Derek Lambertib9cb8442019-11-28 13:34:48 +00001812 catch (std::exception& error)
Mike Kellyb8805202019-07-31 17:25:43 +01001813 {
1814 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1815 }
1816
1817 // Validate the output shape is correct given the input shapes based on the
1818 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1819 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1820 {
1821 return Fail("%s: Error validating the output shape for concat", __func__);
1822 }
1823
1824 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1825 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1826 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1827
1828 bool isSupported = false;
1829 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1830 IsConcatSupported,
1831 data.m_Backends,
1832 isSupported,
1833 inputTensorInfos,
1834 outputInfo,
1835 concatDescriptor);
1836 if (!isSupported)
1837 {
1838 return false;
1839 }
1840
1841 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1842 assert(layer != nullptr);
1843 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1844
1845 // Connect inputs to the layer
1846 const int numInputSlots = layer->GetNumInputSlots();
1847 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1848 for (int i = 0; i < numInputSlots; ++i)
1849 {
1850 // connect the input directly to the merge (concat) layer
1851 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1852 }
1853
1854 if (needPermute)
1855 {
1856 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1857 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1858 layer->GetOutputSlot(0),
1859 permutationPair.second);
1860 layer = &deswizzleLayer;
1861 }
1862
1863 if (inputsHaveBeenReshaped)
1864 {
1865 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1866
1867 // Undo the reshape knowing the amount of dimensions added
1868 if (tensorDimensionsAdded == 1)
1869 {
1870 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1871 afterConcatInfo.GetShape()[2] }));
1872 }
1873 else if (tensorDimensionsAdded == 2)
1874 {
1875 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1876 }
1877
1878 layer = &AddReshapeLayer(
1879 *data.m_Network,
1880 layer->GetOutputSlot(0),
1881 afterConcatInfo
1882 );
1883 }
1884
1885 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1886}
1887
1888template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001889 typename HalOperation = typename HalPolicy::Operation,
1890 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001891bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1892{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001893 using HalOperand = typename HalPolicy::Operand;
1894 using HalOperandType = typename HalPolicy::OperandType;
1895
1896 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001897 if (!input.IsValid())
1898 {
1899 return Fail("%s: Operation has invalid inputs", __func__);
1900 }
1901
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001902 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001903 if (!output)
1904 {
1905 return Fail("%s: Could not read output 0", __func__);
1906 }
1907
1908 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001909 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001910
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001911 if (IsDynamicTensor(outputInfo))
1912 {
1913 return Fail("%s: Dynamic output tensors are not supported", __func__);
1914 }
1915
Mike Kellyb5fdf382019-06-11 16:35:25 +01001916 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001917 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1918 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001919
1920 if (!weightsPin.IsValid() || !biasPin.IsValid())
1921 {
1922 return Fail("%s: Operation has invalid inputs", __func__);
1923 }
1924
1925 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001926 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001927 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1928
1929 armnn::Convolution2dDescriptor desc;
1930 desc.m_DataLayout = armnn::DataLayout::NHWC;
1931 ActivationFn activation;
1932
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001933 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001934 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001935 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1936 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1937 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1938 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1939 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1940 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001941 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001942 {
1943 return Fail("%s: Operation has invalid inputs", __func__);
1944 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001945 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001946 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001947 {
1948 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001949 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1950 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1951 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001952 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001953 {
1954 return Fail("%s: Operation has invalid inputs", __func__);
1955 }
1956
1957 const uint32_t kernelX = weights.GetShape()[2];
1958 const uint32_t kernelY = weights.GetShape()[1];
1959 const uint32_t inputX = inputInfo.GetShape()[2];
1960 const uint32_t inputY = inputInfo.GetShape()[1];
1961
1962 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1963 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001964 }
1965 else
1966 {
1967 return Fail("%s: Unsupported number of operation inputs", __func__);
1968 }
1969
1970 desc.m_BiasEnabled = true;
1971 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1972
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001973 bool isSupported = false;
1974 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1975 IsConvolution2dSupported,
1976 data.m_Backends,
1977 isSupported,
1978 inputInfo,
1979 outputInfo,
1980 desc,
1981 weights.GetInfo(),
1982 biases);
1983 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001984 {
1985 return false;
1986 }
1987
1988 armnn::IConnectableLayer* startLayer =
1989 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1990
1991 if (!startLayer)
1992 {
1993 return Fail("%s: AddConvolution2dLayer failed", __func__);
1994 }
1995
1996 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1997
1998 if (!endLayer)
1999 {
2000 return Fail("%s: ProcessActivation failed", __func__);
2001 }
2002
2003 input.Connect(startLayer->GetInputSlot(0));
2004
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002005 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002006}
2007
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002008template<typename HalPolicy,
2009 typename HalOperation = typename HalPolicy::Operation,
2010 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01002011bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
2012{
2013 using HalOperand = typename HalPolicy::Operand;
2014 using HalOperandType = typename HalPolicy::OperandType;
2015
2016 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2017 if (!input.IsValid() )
2018 {
2019 return Fail("%s: Operation has invalid inputs", __func__);
2020 }
2021
2022 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2023 unsigned int rank = inputInfo.GetNumDimensions();
2024 if (rank != 4)
2025 {
2026 return Fail("%s: Only inputs with rank 4 are supported", __func__);
2027 }
2028
2029 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2030 if (!output)
2031 {
2032 return Fail("%s: Could not read output 0", __func__);
2033 }
2034
2035 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2036 if (IsDynamicTensor(outputInfo))
2037 {
2038 return Fail("%s: Dynamic output tensors are not supported", __func__);
2039 }
2040
2041 armnn::DepthToSpaceDescriptor descriptor;
2042
2043 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
2044 if (descriptor.m_BlockSize <= 1)
2045 {
2046 return Fail("%s: Block size must be at least 1 in all dimensions");
2047 }
2048
2049 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2050 if (Is12Operand(*output))
2051 {
2052 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
2053 }
2054
2055 bool isSupported = false;
2056 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2057 IsDepthToSpaceSupported,
2058 data.m_Backends,
2059 isSupported,
2060 inputInfo,
2061 outputInfo,
2062 descriptor);
2063 if (!isSupported)
2064 {
2065 return false;
2066 }
2067
2068 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
2069 assert(layer != nullptr);
2070 input.Connect(layer->GetInputSlot(0));
2071
2072 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2073}
2074
2075template<typename HalPolicy,
2076 typename HalOperation = typename HalPolicy::Operation,
2077 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01002078bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
2079{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002080 using HalOperand = typename HalPolicy::Operand;
2081 using HalOperandType = typename HalPolicy::OperandType;
2082
2083 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002084
2085 if (!input.IsValid())
2086 {
2087 return Fail("%s: Operation has invalid inputs", __func__);
2088 }
2089
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002090 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002091
2092 if (!output)
2093 {
2094 return Fail("%s: Could not read output 0", __func__);
2095 }
2096
2097 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002098 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002099
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002100 if (IsDynamicTensor(outputInfo))
2101 {
2102 return Fail("%s: Dynamic output tensors are not supported", __func__);
2103 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002104
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002105 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002106 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002107 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002108
2109 if (weightsOperand == nullptr)
2110 {
2111 return Fail("%s: Operand is invalid", __func__);
2112 }
2113 armnn::DepthwiseConvolution2dDescriptor desc;
2114 desc.m_DataLayout = armnn::DataLayout::NHWC;
2115
Mike Kellyb5fdf382019-06-11 16:35:25 +01002116 // Reinterpret weight data as [ H, W, I, M ]
2117 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2118 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002119 inputInfo.GetShape()[3],
2120 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002121
2122 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2123 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2124
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002125 const ConstTensorPin weightsPin =
2126 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2127 1,
2128 model,
2129 data,
2130 HWIMToMIHW,
2131 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002132
2133 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002134 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002135
2136 if (!weightsPin.IsValid() || !biasPin.IsValid())
2137 {
2138 return Fail("%s: Operation has invalid inputs", __func__);
2139 }
2140
2141 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2142 armnn::ConstTensor bias = biasPin.GetConstTensor();
2143 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2144
2145 ActivationFn activation;
2146
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002147 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002148 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002149 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2150 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2151 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2152 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2153 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2154 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002155 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002156 {
2157 return Fail("%s: Operation has invalid inputs", __func__);
2158 }
2159 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002160 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002161 {
2162 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002163 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2164 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2165 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002166 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002167 {
2168 return Fail("%s: Operation has invalid inputs", __func__);
2169 }
2170
2171 const uint32_t kernelX = weights.GetShape()[3];
2172 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002173 const uint32_t inputX = inputInfo.GetShape()[2];
2174 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002175
2176 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2177 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2178 }
2179 else
2180 {
2181 return Fail("%s: Unsupported number of operation inputs", __func__);
2182 }
2183
2184 desc.m_BiasEnabled = true;
2185 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2186
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002187 bool isSupported = false;
2188 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2189 IsDepthwiseConvolutionSupported,
2190 data.m_Backends,
2191 isSupported,
2192 inputInfo,
2193 outputInfo,
2194 desc,
2195 weights.GetInfo(),
2196 biases);
2197 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002198 {
2199 return false;
2200 }
2201
2202 armnn::IConnectableLayer* startLayer =
2203 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2204 if (!startLayer)
2205 {
2206 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2207 }
2208
2209 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2210 if (!endLayer)
2211 {
2212 return Fail("%s: ProcessActivation failed", __func__);
2213 }
2214
2215 input.Connect(startLayer->GetInputSlot(0));
2216
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002217 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002218}
2219
Mike Kelly3c673942019-07-25 09:26:06 +01002220template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002221 typename HalOperation = typename HalPolicy::Operation,
2222 typename HalModel = typename HalPolicy::Model>
2223bool ConvertDequantize(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002224{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002225 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002226
2227 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2228 if (!input.IsValid())
2229 {
2230 return Fail("%s: Operation has invalid input", __func__);
2231 }
2232
Sadik Armagan98c0f662019-11-21 15:54:36 +00002233 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2234 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
2235 if (quantizationDim.has_value() && quantizationDim.value() != 0)
2236 {
2237 return Fail("%s: Operation has quantization dimension different than 0", __func__);
2238 }
2239
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002240 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002241 if (!outputOperand)
2242 {
2243 return Fail("%s: Operation has invalid outputs", __func__);
2244 }
2245
2246 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2247 if (IsDynamicTensor(outputInfo))
2248 {
2249 return Fail("%s: Dynamic output tensors are not supported", __func__);
2250 }
2251
2252 bool isSupported = false;
2253 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2254 IsDequantizeSupported,
2255 data.m_Backends,
2256 isSupported,
Sadik Armagan98c0f662019-11-21 15:54:36 +00002257 inputInfo,
2258 outputInfo);
Mike Kelly46272802019-08-14 17:00:48 +01002259 if (!isSupported)
2260 {
2261 return false;
2262 }
2263
2264 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2265 assert(layer != nullptr);
2266 input.Connect(layer->GetInputSlot(0));
2267
2268 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2269}
2270
2271template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002272 typename HalOperation = typename HalPolicy::Operation,
2273 typename HalModel = typename HalPolicy::Model>
2274bool ConvertDiv(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002275{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002276 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002277
2278 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2279 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2280
2281 if (!input0.IsValid() || !input1.IsValid())
2282 {
2283 return Fail("%s: Operation has invalid inputs", __func__);
2284 }
2285
2286 // The FuseActivation parameter is always the input index 2
2287 // and it should be optional
2288 ActivationFn activationFunction;
2289 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2290 {
2291 return Fail("%s: Operation has invalid inputs", __func__);
2292 }
2293
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002294 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002295 if (!output)
2296 {
2297 return Fail("%s: Could not read output 0", __func__);
2298 }
2299
2300 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2301 if (IsDynamicTensor(outputInfo))
2302 {
2303 return Fail("%s: Dynamic output tensors are not supported", __func__);
2304 }
2305
2306 bool isSupported = false;
2307 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2308 IsDivisionSupported,
2309 data.m_Backends,
2310 isSupported,
2311 input0.GetTensorInfo(),
2312 input1.GetTensorInfo(),
2313 outputInfo);
2314 if (!isSupported)
2315 {
2316 return false;
2317 }
2318
2319 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2320 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2321
2322 if (endLayer)
2323 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002324 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2325 if (!isReshapeSupported)
2326 {
2327 return false;
2328 }
2329
Mike Kelly46272802019-08-14 17:00:48 +01002330 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2331 }
2332 return Fail("%s: ProcessActivation failed", __func__);
2333}
2334
2335template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002336 typename HalOperation = typename HalPolicy::Operation,
2337 typename HalModel = typename HalPolicy::Model>
2338bool ConvertFloor(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002339{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002340 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002341
2342 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2343 if (!input.IsValid())
2344 {
2345 return Fail("%s: Operation has invalid inputs", __func__);
2346 }
2347
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002348 const HalOperand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002349 if (!outputOperand)
2350 {
2351 return Fail("%s: Operation has invalid outputs", __func__);
2352 }
2353
2354 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2355 if (IsDynamicTensor(outputInfo))
2356 {
2357 return Fail("%s: Dynamic output tensors are not supported", __func__);
2358 }
2359
2360 bool isSupported = false;
2361 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2362 IsFloorSupported,
2363 data.m_Backends,
2364 isSupported,
2365 input.GetTensorInfo(),
2366 outputInfo);
2367 if (!isSupported)
2368 {
2369 return false;
2370 }
2371
2372 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2373 assert(layer != nullptr);
2374 input.Connect(layer->GetInputSlot(0));
2375
2376 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2377}
2378
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002379inline bool IsQSymm8(const V1_0::Operand&)
2380{
2381 return false;
2382}
2383
2384#ifdef ARMNN_ANDROID_NN_V1_2
2385
2386inline bool IsQSymm8(const V1_2::Operand& operand)
2387{
2388 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2389}
2390
2391#endif
2392
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002393enum class DequantizeStatus
2394{
2395 SUCCESS,
2396 NOT_REQUIRED,
2397 INVALID_OPERAND
2398};
2399
2400using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
2401
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002402template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002403 typename HalOperation = typename HalPolicy::Operation,
2404 typename HalModel = typename HalPolicy::Model>
2405DequantizeResult DequantizeIfRequired(size_t operand_index,
2406 const HalOperation& operation,
2407 const HalModel& model,
2408 const ConversionData& data)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002409{
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002410 using HalOperand = typename HalPolicy::Operand;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002411
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002412 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, operand_index, model);
Sadik Armagand0811942019-11-18 17:11:21 +00002413 if (!weightsOperand)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002414 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002415 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
Sadik Armagand0811942019-11-18 17:11:21 +00002416 }
2417
2418 if (IsOperandConstant<HalPolicy>(*weightsOperand))
2419 {
2420 // Weights are already constant
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002421 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002422 }
2423
2424 const size_t weightsInputIndex = operation.inputs[operand_index];
2425
2426 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2427 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2428 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); ++operationIdx)
2429 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002430 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002431 const auto& operationIt = model.operations[operationIdx];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002432 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2433 {
2434 continue;
2435 }
2436
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002437 size_t outOpIndex = weightsInputIndex + 1;
2438 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002439 {
2440 outOpIndex = operationIt.outputs[i];
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002441 }
2442
2443 if (outOpIndex != weightsInputIndex)
2444 {
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002445 continue;
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002446 }
2447
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002448 const HalOperand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002449 BOOST_ASSERT(operand);
2450
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002451 if (!IsQSymm8(*operand))
2452 {
2453 // Only supporting dequantize from QSYMM8 to FLOAT
2454 break;
2455 }
2456
2457 // Allocate a new buffer for the dequantized data and manually dequantize
2458 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2459 if (!startValue)
2460 {
2461 // Failed to get the operand address
2462 break;
2463 }
2464
2465 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2466 size_t dequantizedBufferLength = operand->location.length;
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002467 const float quantizationScale = operand->scale;
2468
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002469 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2470 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2471 {
2472 float* dstPtr = dequantizedBuffer.get();
2473 BOOST_ASSERT(dstPtr);
2474 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2475 }
2476
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002477 // Construct tensor info for dequantized ConstTensor
2478 armnn::TensorInfo tensorInfo(operand->dimensions.size(),
2479 operand->dimensions.data(),
2480 armnn::DataType::Float32);
2481
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002482 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
2483 std::move(tensorInfo),
2484 DequantizeStatus::SUCCESS };
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002485 }
2486
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002487 return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002488}
2489
2490template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002491 typename HalOperation = typename HalPolicy::Operation,
2492 typename HalModel = typename HalPolicy::Model>
2493ConstTensorPin DequantizeAndMakeConstTensorPin(const HalOperation& operation,
2494 const HalModel& model,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002495 const ConversionData& data,
2496 size_t operandIndex,
2497 bool optional = false)
2498{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002499 DequantizeResult dequantized = DequantizeIfRequired<HalPolicy>(operandIndex,operation, model, data);
2500
2501 DequantizeStatus status = std::get<3>(dequantized);
2502 switch (status)
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002503 {
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002504 case DequantizeStatus::INVALID_OPERAND:
2505 {
2506 // return invalid const tensor pin
2507 return ConstTensorPin();
2508 }
2509 case DequantizeStatus::NOT_REQUIRED:
2510 {
2511 return ConvertOperationInputToConstTensorPin<HalPolicy>(
2512 operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
2513 }
2514 case DequantizeStatus::SUCCESS:
2515 default:
2516 {
2517 return ConstTensorPin(
2518 std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
2519 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002520 }
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002521}
2522
2523
Mike Kelly46272802019-08-14 17:00:48 +01002524template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002525 typename HalOperation = typename HalPolicy::Operation,
2526 typename HalModel = typename HalPolicy::Model>
2527bool ConvertFullyConnected(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002528{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002529 using HalOperand = typename HalPolicy::Operand;
2530
Mike Kelly46272802019-08-14 17:00:48 +01002531 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2532 if (!input.IsValid())
2533 {
2534 return Fail("%s: Operation has invalid inputs", __func__);
2535 }
2536
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002537 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002538 if (!output)
2539 {
2540 return Fail("%s: Could not read output 0", __func__);
2541 }
2542
2543 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2544 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2545
2546 if (IsDynamicTensor(outputInfo))
2547 {
2548 return Fail("%s: Dynamic output tensors are not supported", __func__);
2549 }
2550
Aron Virginas-Tar65a1b1d2019-11-15 15:59:51 +00002551 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy>(operation, model, data, 1);
2552 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002553
2554 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002555 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002556 return Fail("%s: Operation has invalid weights", __func__);
2557 }
2558
2559 if (!biasPin.IsValid())
2560 {
2561 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002562 }
2563
2564 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2565 armnn::ConstTensor bias = biasPin.GetConstTensor();
2566 armnn::TensorInfo reshapedInfo = inputInfo;
2567
2568 try
2569 {
2570 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002571 }
2572 catch (const std::exception& e)
2573 {
Mike Kelly46272802019-08-14 17:00:48 +01002574 return Fail("%s: %s", __func__, e.what());
2575 }
2576
2577 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2578 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2579
2580 ActivationFn activationFunction;
2581 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2582 {
2583 return Fail("%s: Operation has invalid inputs", __func__);
2584 }
2585
2586 armnn::FullyConnectedDescriptor desc;
2587 desc.m_TransposeWeightMatrix = true;
2588 desc.m_BiasEnabled = true;
2589
2590 bool isSupported = false;
2591 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2592 IsFullyConnectedSupported,
2593 data.m_Backends,
2594 isSupported,
2595 reshapedInfo,
2596 outputInfo,
2597 weights.GetInfo(),
2598 bias.GetInfo(),
2599 desc);
2600 if (!isSupported)
2601 {
2602 return false;
2603 }
2604
2605 armnn::IConnectableLayer* startLayer =
2606 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2607 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2608
2609 if (endLayer != nullptr)
2610 {
2611 if (inputInfo.GetNumDimensions() > 2U)
2612 {
2613 armnn::ReshapeDescriptor reshapeDescriptor;
2614 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2615
2616 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2617 assert(reshapeLayer != nullptr);
2618 input.Connect(reshapeLayer->GetInputSlot(0));
2619 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2620 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2621 }
2622 else
2623 {
2624 input.Connect(startLayer->GetInputSlot(0));
2625 }
2626
2627 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2628 }
2629 else
2630 {
2631 return Fail("%s: ProcessActivation failed", __func__);
2632 }
2633}
2634
2635template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002636 typename HalOperation = typename HalPolicy::Operation,
2637 typename HalModel = typename HalPolicy::Model>
2638bool ConvertL2Normalization(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002639{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002640 using HalOperand = typename HalPolicy::Operand;
2641
Mike Kelly999e2092019-08-15 10:46:46 +01002642 if (operation.inputs.size() != 1)
2643 {
2644 return Fail("%s: Optional inputs are not supported", __func__);
2645 }
2646
Mike Kelly46272802019-08-14 17:00:48 +01002647 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2648 if (!input.IsValid())
2649 {
2650 return Fail("%s: Operation has invalid inputs", __func__);
2651 }
2652
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002653 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002654 if (!output)
2655 {
2656 return Fail("%s: Could not read output 0", __func__);
2657 }
2658
2659 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2660 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2661
2662 if (IsDynamicTensor(outputInfo))
2663 {
2664 return Fail("%s: Dynamic output tensors are not supported", __func__);
2665 }
2666 if (outputInfo.GetNumDimensions() != 4u)
2667 {
2668 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2669 }
2670
2671 armnn::L2NormalizationDescriptor desc;
2672 desc.m_DataLayout = armnn::DataLayout::NHWC;
2673
2674 bool isSupported = false;
2675 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2676 IsL2NormalizationSupported,
2677 data.m_Backends,
2678 isSupported,
2679 inputInfo,
2680 outputInfo,
2681 desc);
2682 if (!isSupported)
2683 {
2684 return false;
2685 }
2686
2687 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2688 assert(layer != nullptr);
2689 input.Connect(layer->GetInputSlot(0));
2690
2691 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2692}
2693
2694template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002695 typename HalOperation = typename HalPolicy::Operation,
2696 typename HalModel = typename HalPolicy::Model>
2697bool ConvertLocalResponseNormalization(const HalOperation& operation,
2698 const HalModel& model,
Mike Kelly46272802019-08-14 17:00:48 +01002699 ConversionData& data)
2700{
Mike Kelly999e2092019-08-15 10:46:46 +01002701 if (operation.inputs.size() != 5)
2702 {
2703 return Fail("%s: Optional inputs are not supported", __func__);
2704 }
2705
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002706 using HalOperand = typename HalPolicy::Operand;
2707 using HalOperandType = typename HalPolicy::OperandType;
Mike Kelly46272802019-08-14 17:00:48 +01002708
2709 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2710 if (!input.IsValid())
2711 {
2712 return Fail("%s: Operation has invalid inputs", __func__);
2713 }
2714
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002715 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002716 if (!output)
2717 {
2718 return Fail("%s: Could not read output 0", __func__);
2719 }
2720
2721 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2722 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2723
2724 if (IsDynamicTensor(outputInfo))
2725 {
2726 return Fail("%s: Dynamic output tensors are not supported", __func__);
2727 }
2728 if (outputInfo.GetNumDimensions() != 4u)
2729 {
2730 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2731 }
2732
2733 armnn::NormalizationDescriptor descriptor;
2734 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2735 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2736 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2737
2738 if (!input.IsValid() ||
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002739 !GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_NormSize, model, data) ||
Mike Kelly46272802019-08-14 17:00:48 +01002740 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2741 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2742 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2743 {
2744 return Fail("%s: Operation has invalid inputs", __func__);
2745 }
2746
2747 // ArmNN expects normSize to be the full size of the normalization
2748 // window rather than the radius as in AndroidNN.
2749 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2750
2751 bool isSupported = false;
2752 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2753 IsNormalizationSupported,
2754 data.m_Backends,
2755 isSupported,
2756 inputInfo,
2757 outputInfo,
2758 descriptor);
2759 if (!isSupported)
2760 {
2761 return false;
2762 }
2763
2764
2765 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2766 assert(layer != nullptr);
2767 input.Connect(layer->GetInputSlot(0));
2768
2769 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2770}
2771
2772template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002773 typename HalOperation = typename HalPolicy::Operation,
2774 typename HalModel = typename HalPolicy::Model>
2775bool ConvertLogistic(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002776{
Mike Kelly46272802019-08-14 17:00:48 +01002777 armnn::ActivationDescriptor desc;
2778 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2779
2780 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2781}
2782
2783template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002784 typename HalOperation = typename HalPolicy::Operation,
2785 typename HalModel = typename HalPolicy::Model>
2786bool ConvertMean(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002787{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002788 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002789
2790 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2791 if (!input.IsValid())
2792 {
2793 return Fail("%s: Operation has invalid inputs", __func__);
2794 }
2795
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002796 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002797 if (!output)
2798 {
2799 return Fail("%s: Could not read output 0", __func__);
2800 }
2801
2802 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2803 if (IsDynamicTensor(outputInfo))
2804 {
2805 return Fail("%s: Dynamic output tensors are not supported", __func__);
2806 }
2807
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002808 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kelly46272802019-08-14 17:00:48 +01002809 if (!axisOperand)
2810 {
2811 return Fail("%s: Could not read input 1", __func__);
2812 }
2813
2814 std::vector<int32_t> axis;
2815 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2816 {
2817 return Fail("%s: Input 1 has invalid values", __func__);
2818 }
2819
2820 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2821
2822 // Convert the axis to unsigned int and remove duplicates.
2823 unsigned int rank = inputInfo.GetNumDimensions();
2824 std::set<unsigned int> uniqueAxis;
2825 std::transform(axis.begin(), axis.end(),
2826 std::inserter(uniqueAxis, uniqueAxis.begin()),
2827 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2828
2829 // Get the "keep dims" flag.
2830 int32_t keepDims = 0;
2831 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2832 {
2833 return Fail("%s: Could not read input 2", __func__);
2834 }
2835
2836 armnn::MeanDescriptor descriptor;
2837 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2838 descriptor.m_KeepDims = keepDims > 0;
2839
2840 bool isSupported = false;
2841 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2842 IsMeanSupported,
2843 data.m_Backends,
2844 isSupported,
2845 inputInfo,
2846 outputInfo,
2847 descriptor);
2848 if (!isSupported)
2849 {
2850 return false;
2851 }
2852
2853 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2854 assert(layer != nullptr);
2855 input.Connect(layer->GetInputSlot(0));
2856
2857 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2858}
2859
2860template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002861 typename HalOperation = typename HalPolicy::Operation,
2862 typename HalModel = typename HalPolicy::Model>
2863bool ConvertMul(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002864{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002865 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002866
2867 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2868 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2869
2870 if (!input0.IsValid() || !input1.IsValid())
2871 {
2872 return Fail("%s: Operation has invalid inputs", __func__);
2873 }
2874
2875 // The FuseActivation parameter is always the input index 2
2876 // and it should be optional
2877 ActivationFn activationFunction;
2878 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2879 {
2880 return Fail("%s: Operation has invalid inputs", __func__);
2881 }
2882
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002883 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01002884
2885 if (outputOperand == nullptr)
2886 {
2887 return false;
2888 }
2889
2890 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2891 if (IsDynamicTensor(outputInfo))
2892 {
2893 return Fail("%s: Dynamic output tensors are not supported", __func__);
2894 }
2895
2896 bool isSupported = false;
2897 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2898 IsMultiplicationSupported,
2899 data.m_Backends,
2900 isSupported,
2901 input0.GetTensorInfo(),
2902 input1.GetTensorInfo(),
2903 outputInfo);
2904 if (!isSupported)
2905 {
2906 return false;
2907 }
2908
2909 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2910 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2911
2912 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2913 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2914
2915 if (endLayer != nullptr)
2916 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002917 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2918 if (!isReshapeSupported)
2919 {
2920 return false;
2921 }
2922
Mike Kelly46272802019-08-14 17:00:48 +01002923 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2924 }
2925 else
2926 {
2927 return Fail("%s: ProcessActivation failed", __func__);
2928 }
2929}
2930
2931template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002932 typename HalOperation = typename HalPolicy::Operation,
2933 typename HalModel = typename HalPolicy::Model>
2934bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002935{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002936 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002937
Mike Kelly3c673942019-07-25 09:26:06 +01002938 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2939 if (!input.IsValid())
2940 {
2941 return Fail("%s: Operation has invalid inputs", __func__);
2942 }
2943
2944 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2945 unsigned int rank = inputInfo.GetNumDimensions();
2946
2947 armnn::PadDescriptor descriptor;
2948 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
2949 {
2950 return Fail("%s: Could not convert paddings", __func__);
2951 }
2952
2953 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
2954 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
2955 // (QuantizationOffset - QuantizationOffset) * scale = 0.
2956 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
2957 {
2958 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
2959 }
2960
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002961 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01002962 if (!output)
2963 {
2964 return Fail("%s: Could not read output", __func__);
2965 }
2966
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002967 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01002968 if (IsDynamicTensor(outputInfo))
2969 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002970 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01002971 }
2972
2973 bool isSupported = false;
2974 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2975 IsPadSupported,
2976 data.m_Backends,
2977 isSupported,
2978 inputInfo,
2979 outputInfo,
2980 descriptor);
2981 if (!isSupported)
2982 {
2983 return false;
2984 }
2985
2986 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
2987 assert(layer != nullptr);
2988 input.Connect(layer->GetInputSlot(0));
2989 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2990
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002991 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01002992}
2993
Mike Kelly0a879362019-07-29 16:56:31 +01002994template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002995 typename HalOperation = typename HalPolicy::Operation,
2996 typename HalModel = typename HalPolicy::Model>
2997bool ConvertReshape(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01002998{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00002999 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003000
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003001 const HalOperand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
3002 const HalOperand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3003 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003004
3005 if (inputOperand == nullptr
3006 || requestedShapeOperand == nullptr
3007 || outputOperand == nullptr)
3008 {
3009 return Fail("%s: Operation has invalid inputs", __func__);
3010 }
3011
3012 if (requestedShapeOperand->dimensions.size() != 1)
3013 {
3014 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
3015 __func__, requestedShapeOperand->dimensions.size());
3016 }
3017
3018 std::vector<int32_t> targetDimensions;
3019 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
3020 {
3021 return Fail("%s: Could not read values of input 1", __func__);
3022 }
3023
3024 const Shape inputOperandShape = GetOperandShape(*inputOperand);
3025
3026 Shape requestedShape;
3027 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
3028 // function that resolves these values into a fully specified tensor shape.
3029 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
3030 {
3031 return Fail("%s: Failed to resolve the requested shape", __func__);
3032 }
3033
3034 const Shape outputOperandShape = GetOperandShape(*outputOperand);
3035 if (!SameShape(requestedShape, outputOperandShape))
3036 {
3037 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
3038 }
3039
3040 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3041 if (!input.IsValid())
3042 {
3043 return Fail("%s: Could not read input 0", __func__);
3044 }
3045
3046 armnn::ReshapeDescriptor reshapeDescriptor;
3047 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
3048 requestedShape.dimensions.data());
3049
3050 bool isSupported = false;
3051 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3052 IsReshapeSupported,
3053 data.m_Backends,
3054 isSupported,
3055 input.GetTensorInfo(),
3056 reshapeDescriptor);
3057 if (!isSupported)
3058 {
3059 return false;
3060 }
3061
3062 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
3063 assert(layer != nullptr);
3064 input.Connect(layer->GetInputSlot(0));
3065
3066 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3067}
3068
3069template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003070 typename HalOperation = typename HalPolicy::Operation,
3071 typename HalModel = typename HalPolicy::Model>
3072bool ConvertSub(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly0a879362019-07-29 16:56:31 +01003073{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003074 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003075
Mike Kelly0a879362019-07-29 16:56:31 +01003076 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3077 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3078
3079 if (!input0.IsValid() || !input1.IsValid())
3080 {
3081 return Fail("%s: Operation has invalid inputs", __func__);
3082 }
3083
3084 // The FuseActivation parameter is always the input index 2
3085 // and it should be optional
3086 ActivationFn activationFunction;
3087 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3088 {
3089 return Fail("%s: Operation has invalid inputs", __func__);
3090 }
3091
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003092 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly0a879362019-07-29 16:56:31 +01003093 if (!output)
3094 {
3095 return Fail("%s: Could not read output 0", __func__);
3096 }
3097
3098 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3099 if (IsDynamicTensor(outputInfo))
3100 {
3101 return Fail("%s: Dynamic output tensors are not supported", __func__);
3102 }
3103
3104 bool isSupported = false;
3105 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3106 IsSubtractionSupported,
3107 data.m_Backends,
3108 isSupported,
3109 input0.GetTensorInfo(),
3110 input1.GetTensorInfo(),
3111 outputInfo);
3112 if (!isSupported)
3113 {
3114 return false;
3115 }
3116
3117 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3118 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3119
3120 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3121 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3122
3123 if (endLayer)
3124 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01003125 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3126 if (!isReshapeSupported)
3127 {
3128 return false;
3129 }
Mike Kelly0a879362019-07-29 16:56:31 +01003130 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3131 }
3132
3133 return Fail("%s: ProcessActivation failed", __func__);
3134}
3135
Finn Williams23b87b32019-07-30 11:44:05 +01003136template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003137 typename HalOperation = typename HalPolicy::Operation,
3138 typename HalModel = typename HalPolicy::Model>
3139bool ConvertSqueeze(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003140{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003141 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003142
3143 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3144 if (!input.IsValid())
3145 {
3146 return Fail("%s: Operation has invalid inputs", __func__);
3147 }
3148
3149 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3150 unsigned int rank = inputInfo.GetNumDimensions();
3151 if (rank > 4)
3152 {
3153 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3154 }
3155
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003156 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003157 if (!output)
3158 {
3159 return Fail("%s: Could not read output 0", __func__);
3160 }
3161
3162 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3163 {
3164 return Fail("%s: Dynamic output tensors are not supported", __func__);
3165 }
3166
3167 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3168 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003169 const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003170
3171 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3172
3173 std::vector<int32_t> axis;
3174 if (!axisOperand)
3175 {
3176 axis.assign(dimensionSequence,
3177 dimensionSequence + rank);
3178 }
3179 else
3180 {
3181 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
3182 }
3183
3184 std::vector<uint32_t> outputDims;
3185 for (unsigned int i = 0; i < rank; i++)
3186 {
3187 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3188 auto currentDimension = inputInfo.GetShape()[i];
3189 if (skipSqueeze || currentDimension != 1)
3190 {
3191 outputDims.push_back(currentDimension);
3192 }
3193 }
3194
3195 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3196
3197 armnn::TensorInfo outputInfo = inputInfo;
3198 outputInfo.SetShape(outShape);
3199
3200 armnn::ReshapeDescriptor reshapeDesc;
3201 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3202
3203 bool isSupported = false;
3204 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3205 IsReshapeSupported,
3206 data.m_Backends,
3207 isSupported,
3208 inputInfo,
3209 reshapeDesc);
3210 if (!isSupported)
3211 {
3212 return false;
3213 }
3214
3215 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3216 assert(layer != nullptr);
3217 input.Connect(layer->GetInputSlot(0));
3218
3219 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3220}
3221
3222template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003223 typename HalOperation = typename HalPolicy::Operation,
3224 typename HalModel = typename HalPolicy::Model>
3225bool ConvertStridedSlice(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003226{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003227 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003228
3229 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3230 if (!input.IsValid())
3231 {
3232 return Fail("%s: Operation has invalid inputs", __func__);
3233 }
3234
3235 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3236 unsigned int rank = inputInfo.GetNumDimensions();
3237 if (rank > 4)
3238 {
3239 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3240 }
3241
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003242 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003243 if (!output)
3244 {
3245 return Fail("%s: Could not read output 0", __func__);
3246 }
3247
3248 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3249 if (IsDynamicTensor(outputInfo))
3250 {
3251 return Fail("%s: Dynamic output tensors are not supported", __func__);
3252 }
3253
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003254 const HalOperand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3255 const HalOperand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3256 const HalOperand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
Mike Kelly46272802019-08-14 17:00:48 +01003257
3258 std::vector<int32_t> beginValues;
3259 std::vector<int32_t> endValues;
3260 std::vector<int32_t> stridesValues;
3261
3262 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003263 auto ValidateInputOperands = [&] (const HalOperand& operand, std::vector<int32_t>& operandValues)
Mike Kelly46272802019-08-14 17:00:48 +01003264 {
3265 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3266 {
3267 return false;
3268 }
3269
3270 if (operandValues.size() != rank)
3271 {
3272 return false;
3273 }
3274
3275 return true;
3276 };
3277
3278 if (!ValidateInputOperands(*beginOperand, beginValues)
3279 || !ValidateInputOperands(*endOperand, endValues)
3280 || !ValidateInputOperands(*stridesOperand, stridesValues))
3281 {
3282 return Fail("%s: Operation has invalid input operand", __func__);
3283 }
3284
3285 // Stride cannot have value '0'
3286 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3287 {
3288 return Fail("%s: Stride must be non-zero value.", __func__);
3289 }
3290
3291 armnn::StridedSliceDescriptor descriptor;
3292 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3293 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3294 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3295 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3296
3297 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3298 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3299 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3300 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3301 {
3302 return Fail("%s: Operation has invalid inputs", __func__);
3303 }
3304
3305 bool isSupported = false;
3306 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3307 IsStridedSliceSupported,
3308 data.m_Backends,
3309 isSupported,
3310 inputInfo,
3311 outputInfo,
3312 descriptor);
3313 if (!isSupported)
3314 {
3315 return false;
3316 }
3317
3318 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3319 assert(layer != nullptr);
3320 input.Connect(layer->GetInputSlot(0));
3321
3322 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3323}
3324
3325template<typename HalPolicy,
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003326 typename HalOperation = typename HalPolicy::Operation,
3327 typename HalModel = typename HalPolicy::Model>
3328bool ConvertTranspose(const HalOperation& operation, const HalModel& model, ConversionData& data)
Mike Kelly46272802019-08-14 17:00:48 +01003329{
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003330 using HalOperand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01003331
3332 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3333 if (!input.IsValid())
3334 {
3335 return Fail("%s: Operation has invalid inputs", __func__);
3336 }
3337
3338 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3339 unsigned int rank = inputInfo.GetNumDimensions();
3340 if (rank > 4)
3341 {
3342 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3343 }
3344
3345 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3346 // if the operand index is out of bounds.
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003347 const HalOperand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
Mike Kelly46272802019-08-14 17:00:48 +01003348
3349 std::vector<int32_t> perm(rank);
3350 if (!permOperand)
3351 {
3352 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3353 for (unsigned int i = rank; i > 0; i--)
3354 {
3355 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3356 }
3357 }
3358 else
3359 {
3360 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3361 }
3362
3363 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3364
Mike Kelly46272802019-08-14 17:00:48 +01003365 armnn::PermuteDescriptor permuteDesc;
Matthew Benthamc4aacb32019-11-27 16:55:05 +00003366 permuteDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
Mike Kelly46272802019-08-14 17:00:48 +01003367
Aron Virginas-Taraa5df2d2019-11-19 12:49:55 +00003368 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly46272802019-08-14 17:00:48 +01003369 if (!output)
3370 {
3371 return Fail("%s: Could not read output 0", __func__);
3372 }
3373
3374 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Matthew Bentham0182fd32019-12-06 09:45:13 +00003375 if (IsDynamicTensor(outputInfo))
3376 {
3377 return Fail("%s: Dynamic output tensors are not supported", __func__);
3378 }
3379
Mike Kelly46272802019-08-14 17:00:48 +01003380
3381 bool isSupported = false;
3382 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3383 IsPermuteSupported,
3384 data.m_Backends,
3385 isSupported,
3386 inputInfo,
3387 outputInfo,
3388 permuteDesc);
3389 if (!isSupported)
3390 {
3391 return false;
3392 }
3393
3394 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3395 assert(layer != nullptr);
3396 input.Connect(layer->GetInputSlot(0));
3397
3398 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3399}
3400
3401template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003402 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003403 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003404 typename HalModel = typename HalPolicy::Model>
3405bool ConvertBatchToSpaceNd(const HalOperation& operation,
3406 const HalModel& model,
3407 ConversionData& data)
3408{
Finn Williams23b87b32019-07-30 11:44:05 +01003409
3410 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3411 if (!input.IsValid())
3412 {
3413 return Fail("%s: Operation has invalid inputs", __func__);
3414 }
3415
3416 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3417 if (!output)
3418 {
3419 return Fail("%s: Could not read output 0", __func__);
3420 }
3421
3422 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3423 if (IsDynamicTensor(outputInfo))
3424 {
3425 return Fail("%s: Dynamic output tensors are not supported", __func__);
3426 }
3427
3428 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3429 if (!blockOperand)
3430 {
3431 return Fail("%s: Could not read input 1", __func__);
3432 }
3433
3434 // Convert the block operand to int32
3435 std::vector<int32_t> block;
3436 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3437 {
3438 return Fail("%s: Input 1 has invalid values", __func__);
3439 }
3440
3441 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3442
3443 unsigned int rank = inputInfo.GetNumDimensions();
3444 if (rank != 4)
3445 {
3446 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3447 }
3448
3449 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3450 {
3451 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3452 " greater than or equal to 1", __func__);
3453 }
3454
3455 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3456 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3457 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3458
3459 if (Is12Operand(*output))
3460 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003461 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003462 }
3463 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3464 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3465
3466 bool isSupported = false;
3467 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3468 IsBatchToSpaceNdSupported,
3469 data.m_Backends,
3470 isSupported,
3471 inputInfo,
3472 outputInfo,
3473 batchToSpaceNdDesc);
3474 if (!isSupported)
3475 {
3476 return false;
3477 }
3478
3479 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3480 assert(layer != nullptr);
3481 input.Connect(layer->GetInputSlot(0));
3482
3483 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3484}
Mike Kelly0a879362019-07-29 16:56:31 +01003485
Finn Williamsd74c5052019-07-30 17:06:00 +01003486template<typename HalPolicy,
3487 typename HalOperation = typename HalPolicy::Operation,
3488 typename HalOperand = typename HalPolicy::Operand,
3489 typename HalModel = typename HalPolicy::Model>
3490bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3491{
3492 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3493 if (!input.IsValid())
3494 {
3495 return Fail("%s: Operation has invalid inputs", __func__);
3496 }
3497
3498 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3499 unsigned int rank = inputInfo.GetNumDimensions();
3500 unsigned int spatialDim = rank - 2;
3501
3502 if (rank != 4)
3503 {
3504 Fail("%s: Only inputs with rank 4 are supported", __func__);
3505 }
3506
3507 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3508 if (!output)
3509 {
3510 return Fail("%s: Could not read output 0", __func__);
3511 }
3512
3513 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3514 if (IsDynamicTensor(outputInfo))
3515 {
3516 return Fail("%s: Dynamic output tensors are not supported", __func__);
3517 }
3518
3519 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3520 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3521
3522 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3523 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3524 {
3525 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3526 }
3527
3528 std::vector<int32_t> blockShape;
3529 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3530 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3531 {
3532 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3533 }
3534
3535 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3536 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3537 {
3538 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3539 }
3540
3541 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3542 std::vector<int32_t> paddings;
3543 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3544 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3545 {
3546 int paddingBeforeInput = paddings[i];
3547 int paddingAfterInput = paddings[i + 1];
3548 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3549 {
3550 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3551 }
3552
3553 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3554 }
3555
3556 armnn::SpaceToBatchNdDescriptor descriptor;
3557 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3558 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3559 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3560
3561 if (Is12Operand(*output))
3562 {
3563 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3564 }
3565
3566 bool isSupported = false;
3567 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3568 IsSpaceToBatchNdSupported,
3569 data.m_Backends,
3570 isSupported,
3571 inputInfo,
3572 outputInfo,
3573 descriptor);
3574 if (!isSupported)
3575 {
3576 return false;
3577 }
3578
3579 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3580 assert(layer != nullptr);
3581 input.Connect(layer->GetInputSlot(0));
3582
3583 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3584}
3585
Kevin May407718f2019-09-09 14:46:41 +01003586template<typename HalPolicy,
3587 typename HalOperation = typename HalPolicy::Operation,
3588 typename HalModel = typename HalPolicy::Model>
3589bool ConvertAbs(const HalOperation& operation, const HalModel& model, ConversionData& data)
3590{
3591 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3592
3593 if (!input.IsValid())
3594 {
3595 return Fail("%s: Operation has invalid input", __func__);
3596 }
3597
3598 using HalOperand = typename HalPolicy::Operand;
3599 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3600 if (!output)
3601 {
3602 return Fail("%s: Could not read output 0", __func__);
3603 }
3604
3605 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3606 if (IsDynamicTensor(outputInfo))
3607 {
3608 return Fail("%s: Dynamic output tensors are not supported", __func__);
3609 }
3610
3611 bool isSupported = false;
3612 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3613 IsAbsSupported,
3614 data.m_Backends,
3615 isSupported,
3616 input.GetTensorInfo(),
3617 outputInfo);
3618
3619 if (!isSupported)
3620 {
3621 return false;
3622 }
3623
3624 armnn::IConnectableLayer* const layer = data.m_Network->AddAbsLayer();
3625 assert(layer != nullptr);
3626 input.Connect(layer->GetInputSlot(0));
3627
3628 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3629}
3630
3631
saoste01b8471482018-10-10 09:44:51 +01003632} // namespace armnn_driver