blob: 88c153756cad10083cea402997dcf30b1b3497c0 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
Mike Kelly46272802019-08-14 17:00:48 +010017#include "1.0/FullyConnected.hpp"
18
arovir01b0717b52018-09-05 17:03:25 +010019#include <ActivationFunctor.h>
20#include <CpuExecutor.h>
21#include <OperationsUtils.h>
22
23#include <boost/assert.hpp>
24#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010025#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010026#include <boost/test/tools/floating_point_comparison.hpp>
27
28#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010029#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010030
31namespace armnn_driver
32{
33
34///
35/// Helper classes
36///
37
38struct ConversionData
39{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 ConversionData(const std::vector<armnn::BackendId>& backends)
41 : m_Backends(backends)
42 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010043 {}
44
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010045 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010046 armnn::INetworkPtr m_Network;
47 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
48 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
49};
50
51class LayerInputHandle
52{
53public:
54 LayerInputHandle();
55 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
56
57 bool IsValid() const;
58
59 void Connect(armnn::IInputSlot& inputSlot);
60
61 const armnn::TensorInfo& GetTensorInfo() const;
62
63private:
64 armnn::IOutputSlot* m_OutputSlot;
65 bool m_Valid;
66 armnn::TensorInfo m_TensorInfo;
67};
68
69class ConstTensorPin
70{
71public:
72 // Creates an invalid tensor pin (can be used to signal errors)
73 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
74 ConstTensorPin(bool optional = false);
75
76 // @param tensorInfo TensorInfo associated with the tensor.
77 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
78 // the model being converted.
79 // @param numBytes Number of bytes for the tensor data.
80 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
81 const armnn::PermutationVector& mappings);
82
83 ConstTensorPin(const ConstTensorPin& other) = delete;
84 ConstTensorPin(ConstTensorPin&& other) = default;
85
86 bool IsValid() const;
87 bool IsOptional() const;
88
89 const armnn::ConstTensor& GetConstTensor() const;
90 const armnn::ConstTensor* GetConstTensorPtr() const;
91
92private:
93 armnn::ConstTensor m_ConstTensor;
94
95 // Owned memory for swizzled tensor data, only required if the tensor needed
96 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
97 // the pools associated with the model being converted.
98 std::vector<uint8_t> m_SwizzledTensorData;
99
100 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
101 bool m_Optional;
102};
103
104} // namespace armnn_driver
105
106///
107/// Utility functions
108///
109
110namespace
111{
112
113using namespace armnn_driver;
114using namespace android::nn;
115
116// Convenience function to log the reason for failing to convert a model.
117// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
118template<class... Args>
119static bool Fail(const char* formatStr, Args&&... args)
120{
121 ALOGD(formatStr, std::forward<Args>(args)...);
122 return false;
123}
124
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100125// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
126// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
127#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100128try \
129{ \
130 for (auto&& backendId : backends) \
131 { \
132 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
133 if (layerSupportObject) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100134 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100135 std::string reasonIfUnsupported; \
136 supported = \
137 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
138 if (supported) \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100139 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100140 break; \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100141 } \
142 else \
143 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100144 if (reasonIfUnsupported.size() > 0) \
145 { \
146 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
147 } \
148 else \
149 { \
150 ALOGD("%s: not supported by armnn", funcName); \
151 } \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100152 } \
153 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100154 else \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100155 { \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100156 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100157 } \
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100158 } \
159 if (!supported) \
160 { \
161 ALOGD("%s: not supported by any specified backend", funcName); \
162 } \
163} \
164catch (const armnn::InvalidArgumentException &e) \
165{ \
166 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
167}
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100168
Mike Kellyb5fdf382019-06-11 16:35:25 +0100169template<typename Operand>
170armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100171{
172 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
173}
174
Matthew Bentham912b3622019-05-03 15:49:14 +0100175inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100176{
Matthew Bentham912b3622019-05-03 15:49:14 +0100177 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
178 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
179 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100180}
181
Mike Kellyb5fdf382019-06-11 16:35:25 +0100182#ifdef ARMNN_ANDROID_NN_V1_2
183
184inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
185{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000186 return type == V1_2::OperandType::BOOL ||
187 type == V1_2::OperandType::TENSOR_FLOAT16 ||
188 type == V1_2::OperandType::TENSOR_FLOAT32 ||
189 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100190 type == V1_2::OperandType::TENSOR_QUANT8_SYMM ||
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000191 type == V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
192 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100193 type == V1_2::OperandType::TENSOR_INT32;
194}
195
196#endif
197
198inline bool IsBool(V1_0::Operand)
199{
200 return false;
201}
202
Sadik Armagan61113162019-07-25 09:09:40 +0100203inline bool Is12Operand(V1_0::Operand)
204{
205 return false;
206}
207
Mike Kellyb5fdf382019-06-11 16:35:25 +0100208#ifdef ARMNN_ANDROID_NN_V1_2
209
210inline bool IsBool(V1_2::Operand operand)
211{
212 return operand.type == V1_2::OperandType::BOOL;
213}
214
Sadik Armagan61113162019-07-25 09:09:40 +0100215/// Checks if a operand is 1_2 Operand
216inline bool Is12Operand(V1_2::Operand)
217{
218 return true;
219}
220
Mike Kellyb5fdf382019-06-11 16:35:25 +0100221#endif
222
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100223template<typename LayerHandleType>
224armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
225 armnn::TensorInfo reshapeInfo)
226{
227 armnn::ReshapeDescriptor reshapeDescriptor;
228 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
229
230 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
231 BOOST_ASSERT(reshapeLayer != nullptr);
232
233 // Attach the input layer to the reshape layer
234 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
235 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
236
237 return *reshapeLayer;
238}
239
Sadik Armagan64b19b52019-08-19 09:49:58 +0100240bool BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
241 armnn::IConnectableLayer* startLayer, ConversionData& data)
arovir01b0717b52018-09-05 17:03:25 +0100242{
243 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100244
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100245 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
246 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
247
248 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
249 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
250
251 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100252 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100253 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
254 input0.Connect(startLayer->GetInputSlot(0));
255 input1.Connect(startLayer->GetInputSlot(1));
256
Sadik Armagan64b19b52019-08-19 09:49:58 +0100257 return true;
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100258 }
259
260 // Since the number of dimensions do not match then we need to add degenerate dimensions
261 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
262
263 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
264 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
265 boost::numeric_cast<int>(inputDimensions1));
266
267 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
268 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
269 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
270
271 const armnn::TensorShape& smallShape = smallInfo.GetShape();
272 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
273 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
274 {
275 reshapedDimensions[i] = smallShape[i - sizeDifference];
276 }
277
278 armnn::TensorInfo reshapedInfo = smallInfo;
279 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
280 reshapedDimensions.data() });
Sadik Armagan64b19b52019-08-19 09:49:58 +0100281
282 // RehsapeDescriptor that is ignored in the IsReshapeSupported function
283 armnn::ReshapeDescriptor reshapeDescriptor;
284
285 bool isSupported = false;
286 FORWARD_LAYER_SUPPORT_FUNC(__func__,
287 IsReshapeSupported,
288 data.m_Backends,
289 isSupported,
290 reshapedInfo,
291 reshapeDescriptor);
292 if (!isSupported)
293 {
294 return false;
295 }
296
297 BOOST_ASSERT(data.m_Network != nullptr);
298 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100299
300 if (input0IsSmaller)
301 {
302 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
303 //
304 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100305 // | |
306 // Reshape |
307 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100308 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100309
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100310 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
311 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100312 }
313 else
314 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100315 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
316 //
317 // Input0 Input1
318 // | |
319 // | Reshape
320 // \ /
321 // StartLayer
322
arovir01b0717b52018-09-05 17:03:25 +0100323 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100324 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100325 }
Sadik Armagan64b19b52019-08-19 09:49:58 +0100326
327 return true;
arovir01b0717b52018-09-05 17:03:25 +0100328}
329
330void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
331 android::nn::PaddingScheme scheme)
332{
333 int32_t padHead;
334 int32_t padTail;
335 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
336 outPadHead = boost::numeric_cast<uint32_t>(padHead);
337 outPadTail = boost::numeric_cast<uint32_t>(padTail);
338}
339
Mike Kelly86b36d42019-07-12 16:39:33 +0100340#ifdef ARMNN_ANDROID_NN_V1_2
341
342void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
343 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
344{
345 int32_t padHead;
346 int32_t padTail;
347 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
348 outPadHead = boost::numeric_cast<uint32_t>(padHead);
349 outPadTail = boost::numeric_cast<uint32_t>(padTail);
350}
351
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +0100352void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, uint32_t stride, int32_t& outPadHead,
353 int32_t& outPadTail, android::nn::PaddingScheme scheme)
354{
355 calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
356}
357
Mike Kelly86b36d42019-07-12 16:39:33 +0100358#endif
359
Matthew Bentham912b3622019-05-03 15:49:14 +0100360Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100361{
362 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100363 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100364 shape.dimensions = operand.dimensions;
365 shape.scale = operand.scale;
366 shape.offset = operand.zeroPoint;
367 return shape;
368}
369
Mike Kelly46272802019-08-14 17:00:48 +0100370#ifdef ARMNN_ANDROID_NN_V1_2
371
372Shape GetOperandShape(const V1_2::Operand& operand)
373{
374 Shape shape;
375 shape.type = OperandType(operand.type);
376 shape.dimensions = operand.dimensions;
377 shape.scale = operand.scale;
378 shape.offset = operand.zeroPoint;
379 return shape;
380}
381
382#endif
383
arovir01b0717b52018-09-05 17:03:25 +0100384// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
385// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
Aron Virginas-Tara0baa172019-08-01 11:24:08 +0100386// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
387// user (us, in this case) to ensure they match.
arovir01b0717b52018-09-05 17:03:25 +0100388void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000389 const armnn::TensorInfo& weightInfo,
390 const armnn::TensorInfo& inputInfo)
arovir01b0717b52018-09-05 17:03:25 +0100391{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000392 if (weightInfo.HasPerAxisQuantization())
arovir01b0717b52018-09-05 17:03:25 +0100393 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000394 // NOTE: Bias scale is always set to 0 for per-axis quantization and
395 // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
396 auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
arovir01b0717b52018-09-05 17:03:25 +0100397 {
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000398 return biasScale * inputInfo.GetQuantizationScale();
399 };
400
401 std::vector<float> biasScales(weightInfo.GetQuantizationScales());
402 std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
403
404 biasInfo.SetQuantizationScales(biasScales);
405 biasInfo.SetQuantizationDim(weightInfo.GetQuantizationDim());
406
407 ALOGV("Bias quantization params have been updated for per-axis quantization");
408 }
409 else
410 {
411 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
412 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
413 {
414 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
415 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
416 {
417 ALOGW("Bias quantization scale has been modified to match input * weights");
418 biasInfo.SetQuantizationScale(expectedBiasScale);
419 }
arovir01b0717b52018-09-05 17:03:25 +0100420 }
421 }
422}
423
424// 4D Tensor Permutations
425const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
426const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
427const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
428const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
429
430// 3D Permutation Vectors
431const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
432const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
433const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
434
435template<typename OSlot>
436armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
437 const armnn::PermutationVector& mappings)
438{
439 // Add swizzle layer
440 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
441
442 BOOST_ASSERT(layer != nullptr);
443
444 // Connect input to swizzle layer
445 input.Connect(layer->GetInputSlot(0));
446
447 // Setup swizzled output
448 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
449 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
450
451 return *layer;
452}
453
454void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
455{
456 // Add swizzle layer
457 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
458 // Connect swizzled input to layer
459 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
460}
461
462armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
463{
464 // Add deswizzle layer
465 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
466 return deswizzleLayer;
467}
468
469// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
470armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
471 LayerInputHandle& input,
472 armnn::IConnectableLayer& firstLayer,
473 armnn::IConnectableLayer& lastLayer)
474{
475 SwizzleIn(network, input, firstLayer, 0);
476 return DeswizzleOut(network, lastLayer, 0);
477}
478
479// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
480armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
481 armnn::IConnectableLayer& layer)
482{
483 return SwizzleInDeswizzleOut(network, input, layer, layer);
484}
485
486bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
487 const armnn::TensorShape & outputShape,
488 uint32_t concatDim)
489{
490 // Validate the output shape is correct given the input shapes (which have just been validated)
491 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
492 if (outputShape.GetNumDimensions() != numDimensions)
493 {
494 return Fail("%s: Output shape has wrong number of dimensions", __func__);
495 }
496
497 unsigned int outputSizeAlongConcatenatedDimension = 0;
498 for (unsigned int i = 0; i < inputShapes.size(); i++)
499 {
500 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
501 }
502
503 for (unsigned int i = 0; i < numDimensions; ++i)
504 {
505 if (i == concatDim)
506 {
507 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
508 {
509 return Fail(
510 "%s: Invalid output shape for dimension %d (%d != %d)",
511 __func__,
512 i,
513 outputShape[i],
514 outputSizeAlongConcatenatedDimension);
515 }
516 }
517 else
518 {
519 if (outputShape[i] != inputShapes[0][i])
520 {
521 return Fail("%s: Invalid output shape", __func__);
522 }
523 }
524 }
525
526 return true;
527}
528
529bool RequiresReshape(armnn::TensorShape & inputShape)
530{
531 return inputShape.GetNumDimensions() < 3;
532}
533
arovir01b0717b52018-09-05 17:03:25 +0100534void SwizzleInputs(armnn::INetwork& network,
535 std::vector<LayerInputHandle>& inputs,
536 std::vector<armnn::TensorShape>& inputShapes,
537 const armnn::PermutationVector& mapping)
538{
539 if (!mapping.IsEqual(IdentityPermutation4D))
540 {
541 size_t nInputs = inputs.size();
542 for (size_t i=0; i<nInputs; ++i)
543 {
544 // add swizzle layer
545 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
546 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
547 auto& outputInfo = outputSlot.GetTensorInfo();
548 // replace inputs with the swizzled ones
549 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
550 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
551 }
552 }
553}
554
narpra01f176d5a2018-11-18 20:17:48 +0000555bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
556 int32_t & concatDimension,
557 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100558{
narpra01f176d5a2018-11-18 20:17:48 +0000559 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100560 BOOST_ASSERT(numberOfDimensions >= 3);
561
562 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000563 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
564 // or along dimension 0 or 2 for a 3-D tensor.
565 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100566 {
narpra01f176d5a2018-11-18 20:17:48 +0000567 concatDimension = 1;
568 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
569 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100570 }
narpra01f176d5a2018-11-18 20:17:48 +0000571 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100572 {
narpra01f176d5a2018-11-18 20:17:48 +0000573 concatDimension = 0;
574 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
575 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100576 }
narpra01f176d5a2018-11-18 20:17:48 +0000577 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100578}
579
580} // anonymous namespace
581
582namespace armnn_driver
583{
584
585//// Creates an ArmNN activation layer and connects it to the given layer, if the
586//// passed in AndroidNN activation function requires so.
587//// @return The end layer of the sequence of layers built for the given AndroidNN
588//// activation function or nullptr if an error occurred (e.g. unsupported activation).
589//// Note that the end layer matches the input layer if no activation is required
590//// (the sequence of layers has length 1).
591armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
592 ActivationFn activation,
593 armnn::IConnectableLayer* prevLayer,
594 ConversionData& data);
595
596} // namespace armnn_driver
597
598///
599/// Utility templates
600///
601
602namespace armnn_driver
603{
604
605using namespace android::nn;
606
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100607template<typename HalPolicy,
608 typename HalOperand = typename HalPolicy::Operand,
609 typename HalOperation = typename HalPolicy::Operation,
610 typename HalModel = typename HalPolicy::Model>
611const HalOperand* GetInputOperand(const HalOperation& operation,
612 uint32_t inputIndex,
613 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100614 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100615{
616 if (inputIndex >= operation.inputs.size())
617 {
saoste01b8471482018-10-10 09:44:51 +0100618 if (failOnIndexOutOfBounds)
619 {
620 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
621 }
arovir01b0717b52018-09-05 17:03:25 +0100622 return nullptr;
623 }
624
625 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
626 return &model.operands[operation.inputs[inputIndex]];
627}
628
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100629template<typename HalPolicy,
630 typename HalOperand = typename HalPolicy::Operand,
631 typename HalOperation = typename HalPolicy::Operation,
632 typename HalModel = typename HalPolicy::Model>
633const HalOperand* GetOutputOperand(const HalOperation& operation,
634 uint32_t outputIndex,
635 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100636{
637 if (outputIndex >= operation.outputs.size())
638 {
639 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
640 return nullptr;
641 }
642
643 // Model should have been validated beforehand
644 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
645
646 return &model.operands[operation.outputs[outputIndex]];
647}
648
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100649template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +0100650 typename HalOperand = typename HalPolicy::Operand,
651 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100652const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100653 const HalModel& model,
654 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000655 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100656{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100657 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100658
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100659 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100660 switch (operand.lifetime)
661 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100662 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100663 {
664 // Constant found in model.operandValues
665 valueStart = &model.operandValues[operand.location.offset];
666 break;
667 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100668 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100669 {
670 // Constant specified via a Memory object
671 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
672 break;
673 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100674 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000675 {
676 // An optional input tensor with no values is not an error so should not register as a fail
677 if (optional)
678 {
679 valueStart = nullptr;
680 break;
681 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100682 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000683 }
arovir01b0717b52018-09-05 17:03:25 +0100684 default:
685 {
686 // Unsupported/invalid (e.g. can't get value of an input to the model)
687 Fail("%s: unsupported/invalid operand lifetime: %s",
688 __func__, toString(operand.lifetime).c_str());
689 valueStart = nullptr;
690 }
691 }
692
693 return valueStart;
694}
695
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100696template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100697 typename HalOperation = typename HalPolicy::Operation,
698 typename HalModel = typename HalPolicy::Model,
699 typename HalOperandType = typename HalPolicy::OperandType>
700bool GetOperandType(const HalOperation& operation,
701 uint32_t inputIndex,
702 const HalModel& model,
703 HalOperandType& type)
704{
705 using HalOperand = typename HalPolicy::Operand;
706
707 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
708 if (!operand)
709 {
710 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
711 }
712
713 type = operand->type;
714 return true;
715}
716
717template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100718 typename HalOperand = typename HalPolicy::Operand,
719 typename HalModel = typename HalPolicy::Model>
720ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
721 const HalModel& model,
722 const ConversionData& data,
723 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
724 const armnn::TensorShape* overrideTensorShape = nullptr,
725 bool optional = false)
726{
727 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
728
729 if (!IsOperandTypeSupportedForTensors(operand.type))
730 {
731 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
732 return ConstTensorPin();
733 }
734
735 if (!optional &&
736 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
737 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
738 operand.lifetime != HalOperandLifeTime::NO_VALUE)
739 {
740 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
741 return ConstTensorPin();
742 }
743
744 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
745 if (!valueStart)
746 {
747 if (optional)
748 {
749 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
750 return ConstTensorPin(true);
751 }
752 // mandatory tensor with no values
753 Fail("%s: failed to get operand address", __func__);
754 return ConstTensorPin();
755 }
756
757 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
758 if (overrideTensorShape != nullptr)
759 {
760 tensorInfo.SetShape(*overrideTensorShape);
761 }
762 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
763}
764
765template<typename HalPolicy,
766 typename HalOperation = typename HalPolicy::Operation,
767 typename HalModel = typename HalPolicy::Model>
768ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
769 uint32_t inputIndex,
770 const HalModel& model,
771 const ConversionData& data,
772 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
773 const armnn::TensorShape* overrideTensorShape = nullptr,
774 bool optional = false)
775{
776 using HalOperand = typename HalPolicy::Operand;
777
778 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
779 if (!operand)
780 {
781 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
782 return ConstTensorPin();
783 }
784 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
785 model,
786 data,
787 dimensionMappings,
788 overrideTensorShape,
789 optional);
790}
791
792template<typename HalPolicy,
793 typename OutputType,
794 typename HalOperandType = typename HalPolicy::OperandType,
795 typename HalOperation = typename HalPolicy::Operation,
796 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100797bool GetInputScalar(const HalOperation& operation,
798 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100799 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100800 OutputType& outValue,
801 const HalModel& model,
802 const ConversionData& data)
803{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100804 using HalOperand = typename HalPolicy::Operand;
805
806 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100807 if (!operand)
808 {
809 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
810 }
811
812 if (operand->type != type)
813 {
814 return Fail("%s: unexpected operand type: %s (should be %s)",
815 __func__, toString(operand->type).c_str(), toString(type).c_str());
816 }
817
818 if (operand->location.length != sizeof(OutputType))
819 {
820 return Fail("%s: incorrect operand location length: %i (should be %i)",
821 __func__, operand->location.length, sizeof(OutputType));
822 }
823
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100824 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100825 if (!valueAddress)
826 {
827 return Fail("%s: failed to get address for operand", __func__);
828 }
829
830 outValue = *(static_cast<const OutputType*>(valueAddress));
831 return true;
832}
833
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100834template<typename HalPolicy,
835 typename HalOperation = typename HalPolicy::Operation,
836 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100837bool GetInputInt32(const HalOperation& operation,
838 uint32_t inputIndex,
839 int32_t& outValue,
840 const HalModel& model,
841 const ConversionData& data)
842{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100843 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100844}
845
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100846template<typename HalPolicy,
847 typename HalOperation = typename HalPolicy::Operation,
848 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100849bool GetInputFloat32(const HalOperation& operation,
850 uint32_t inputIndex,
851 float& outValue,
852 const HalModel& model,
853 const ConversionData& data)
854{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100855 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100856}
857
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100858template<typename HalPolicy,
859 typename HalOperation = typename HalPolicy::Operation,
860 typename HalOperandType = typename HalPolicy::OperandType,
861 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100862bool GetInputActivationFunctionImpl(const HalOperation& operation,
863 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100864 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100865 ActivationFn& outActivationFunction,
866 const HalModel& model,
867 const ConversionData& data)
868{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100869 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100870 {
871 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
872 __func__,
873 toString(type).c_str(),
874 toString(OperandType::INT32).c_str(),
875 toString(OperandType::TENSOR_INT32).c_str());
876 }
877
878 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100879 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100880 {
881 return Fail("%s: failed to get activation input value", __func__);
882 }
883 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
884 return true;
885}
886
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100887template<typename HalPolicy,
888 typename HalOperation = typename HalPolicy::Operation,
889 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100890bool GetInputActivationFunction(const HalOperation& operation,
891 uint32_t inputIndex,
892 ActivationFn& outActivationFunction,
893 const HalModel& model,
894 const ConversionData& data)
895{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100896 return GetInputActivationFunctionImpl<HalPolicy>(operation,
897 inputIndex,
898 HalPolicy::OperandType::INT32,
899 outActivationFunction,
900 model,
901 data);
arovir01b0717b52018-09-05 17:03:25 +0100902}
903
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100904template<typename HalPolicy,
905 typename HalOperation = typename HalPolicy::Operation,
906 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100907bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
908 uint32_t inputIndex,
909 ActivationFn& outActivationFunction,
910 const HalModel& model,
911 const ConversionData& data)
912{
913 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100914 return GetInputActivationFunctionImpl<HalPolicy>(operation,
915 inputIndex,
916 HalPolicy::OperandType::INT32,
917 outActivationFunction,
918 model,
919 data);
arovir01b0717b52018-09-05 17:03:25 +0100920}
921
922
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100923template<typename HalPolicy,
924 typename HalOperation = typename HalPolicy::Operation,
925 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100926bool GetOptionalInputActivation(const HalOperation& operation,
927 uint32_t inputIndex,
928 ActivationFn& activationFunction,
929 const HalModel& model,
930 const ConversionData& data)
931{
932 if (operation.inputs.size() <= inputIndex)
933 {
934 activationFunction = ActivationFn::kActivationNone;
935 }
936 else
937 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100938 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100939 {
940 return Fail("%s: Operation has invalid inputs", __func__);
941 }
942 }
943 return true;
944}
945
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100946template<typename HalPolicy,
947 typename ConvolutionDescriptor,
948 typename HalOperation = typename HalPolicy::Operation,
949 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100950bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
951 uint32_t dilationXIndex,
952 ConvolutionDescriptor& descriptor,
953 const HalModel& model,
954 const ConversionData& data)
955{
956 bool success = true;
957 if (operation.inputs.size() >= dilationXIndex + 2)
958 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100959 success &= GetInputScalar<HalPolicy>(operation,
960 dilationXIndex,
961 HalPolicy::OperandType::INT32,
962 descriptor.m_DilationX,
963 model,
964 data);
965 success &= GetInputScalar<HalPolicy>(operation,
966 dilationXIndex + 1,
967 HalPolicy::OperandType::INT32,
968 descriptor.m_DilationY,
969 model,
970 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100971 }
972
973 return success;
974}
975
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100976template<typename HalPolicy,
977 typename HalOperand = typename HalPolicy::Operand,
978 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100979bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100980 std::vector<int32_t>& outValues,
981 const HalModel& model,
982 const ConversionData& data)
983{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100984 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100985 {
986 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
987 }
988
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100989 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100990 if (!startAddress)
991 {
992 return Fail("%s: failed to get operand address", __func__, operand.type);
993 }
994
995 // Check number of bytes is sensible
996 const uint32_t numBytes = operand.location.length;
997 if (numBytes % sizeof(int32_t) != 0)
998 {
999 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1000 __func__, numBytes, sizeof(int32_t));
1001 }
1002
1003 outValues.resize(numBytes / sizeof(int32_t));
1004 memcpy(outValues.data(), startAddress, numBytes);
1005 return true;
1006}
1007
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001008template<typename HalPolicy,
1009 typename HalOperation = typename HalPolicy::Operation,
1010 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001011bool GetInputPaddingScheme(const HalOperation& operation,
1012 uint32_t inputIndex,
1013 PaddingScheme& outPaddingScheme,
1014 const HalModel& model,
1015 const ConversionData& data)
1016{
1017 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001018 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001019 {
1020 return Fail("%s: failed to get padding scheme input value", __func__);
1021 }
1022
1023 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1024 return true;
1025}
1026
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001027template<typename HalPolicy,
1028 typename HalOperation = typename HalPolicy::Operation,
1029 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001030LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
1031 uint32_t inputIndex,
1032 const HalModel& model,
1033 ConversionData& data)
1034{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001035 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +01001036 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001037 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
1038
1039 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +01001040 if (!operand)
1041 {
1042 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1043 return LayerInputHandle();
1044 }
1045
1046 if (!IsOperandTypeSupportedForTensors(operand->type))
1047 {
1048 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1049 return LayerInputHandle();
1050 }
1051
Sadik Armagan44bcc022019-06-18 17:21:36 +01001052 try
arovir01b0717b52018-09-05 17:03:25 +01001053 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001054 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001055 if (IsDynamicTensor(operandTensorInfo))
1056 {
1057 Fail("%s: dynamic input tensors are not supported", __func__);
1058 return LayerInputHandle();
1059 }
arovir01b0717b52018-09-05 17:03:25 +01001060
Sadik Armagan44bcc022019-06-18 17:21:36 +01001061 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +01001062 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001063 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001064 {
1065 // NOTE: We must check whether we can support the input tensor on at least one
1066 // of the provided backends; otherwise we cannot convert the operation
1067 bool isInputSupported = false;
1068 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1069 IsInputSupported,
1070 data.m_Backends,
1071 isInputSupported,
1072 operandTensorInfo);
1073
1074 if (!isInputSupported)
1075 {
1076 Fail("%s: unsupported input tensor", __func__);
1077 return LayerInputHandle();
1078 }
1079
1080 BOOST_FALLTHROUGH; // intentional fallthrough
1081 }
1082 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001083 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001084 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001085 // The tensor is either an operand internal to the model, or a model input.
1086 // It can be associated with an ArmNN output slot for an existing layer.
1087
1088 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1089 const uint32_t operandIndex = operation.inputs[inputIndex];
1090 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001091 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001092 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001093 case HalOperandLifeTime::CONSTANT_REFERENCE:
1094 {
1095 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1096 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1097 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001098 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001099 bool isSupported = false;
1100 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1101 IsConstantSupported,
1102 data.m_Backends,
1103 isSupported,
1104 tensorPin.GetConstTensor().GetInfo());
Mike Kelly28e3d9f2019-08-07 14:55:04 +01001105 if (!isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001106 {
1107 return LayerInputHandle();
1108 }
1109
1110 armnn::IConnectableLayer* constantLayer =
1111 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1112 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1113 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1114
1115 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1116 }
1117 else
1118 {
1119 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001120 return LayerInputHandle();
1121 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001122 break;
arovir01b0717b52018-09-05 17:03:25 +01001123 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001124 default:
arovir01b0717b52018-09-05 17:03:25 +01001125 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001126 // Unsupported lifetime for an input tensor
1127 Fail("%s: unsupported lifetime for input tensor: %s",
1128 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001129 return LayerInputHandle();
1130 }
arovir01b0717b52018-09-05 17:03:25 +01001131 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001132 }
1133 catch (UnsupportedOperand<HalOperandType>& e)
1134 {
1135 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1136 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001137 }
1138}
1139
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001140template<typename HalPolicy,
1141 typename HalOperation = typename HalPolicy::Operation,
1142 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001143bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1144 uint32_t operationOutputIndex,
1145 armnn::IConnectableLayer& layer,
1146 uint32_t layerOutputIndex,
1147 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001148 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001149{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001150 using HalOperand = typename HalPolicy::Operand;
1151
1152 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001153 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1154 {
1155 return false;
1156 }
1157
1158 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1159
1160 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1161 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1162
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001163 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001164
1165 return true;
1166}
1167
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001168template<typename HalPolicy,
1169 typename HalOperation = typename HalPolicy::Operation,
1170 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001171armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1172 uint32_t inputIndex,
1173 const HalModel& model,
1174 ConversionData& data)
1175{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001176 using HalOperand = typename HalPolicy::Operand;
1177
1178 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001179 if (!operand)
1180 {
1181 return armnn::DataLayout::NHWC;
1182 }
1183
1184 if (!IsBool(*operand))
1185 {
1186 return armnn::DataLayout::NHWC;
1187 }
1188
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001189 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001190 if (!valueAddress)
1191 {
1192 return armnn::DataLayout::NHWC;
1193 }
1194
1195 if (*(static_cast<const bool*>(valueAddress)))
1196 {
1197 return armnn::DataLayout::NCHW;
1198 }
1199 else
1200 {
1201 return armnn::DataLayout::NHWC;
1202 }
1203}
1204
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001205template<typename HalPolicy,
1206 typename HalOperation = typename HalPolicy::Operation,
1207 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001208bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1209 uint32_t outputIndex,
1210 armnn::IConnectableLayer& layer,
1211 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001212 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001213{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001214 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1215 outputIndex,
1216 layer,
1217 outputIndex,
1218 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001219 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001220}
1221
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001222template<typename HalPolicy,
1223 typename HalOperation = typename HalPolicy::Operation,
1224 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001225bool ConvertToActivation(const HalOperation& operation,
1226 const char* operationName,
1227 const armnn::ActivationDescriptor& activationDesc,
1228 const HalModel& model,
1229 ConversionData& data)
1230{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001231 using HalOperand = typename HalPolicy::Operand;
1232
1233 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001234 if (!input.IsValid())
1235 {
1236 return Fail("%s: Input 0 is invalid", operationName);
1237 }
1238
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001239 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001240 if (!outputOperand)
1241 {
1242 return false;
1243 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001244
1245 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001246 if (IsDynamicTensor(outInfo))
1247 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001248 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001249 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001250
1251 bool isSupported = false;
1252 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1253 IsActivationSupported,
1254 data.m_Backends,
1255 isSupported,
1256 input.GetTensorInfo(),
1257 outInfo,
1258 activationDesc);
1259 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001260 {
1261 return false;
1262 }
1263
1264 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1265 BOOST_ASSERT(layer != nullptr);
1266 input.Connect(layer->GetInputSlot(0));
1267
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001268 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001269}
1270
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001271template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001272 typename HalOperation = typename HalPolicy::Operation,
1273 typename HalModel = typename HalPolicy::Model>
1274bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1275{
1276 armnn::ActivationDescriptor desc;
1277 desc.m_Function = armnn::ActivationFunction::ReLu;
1278
1279 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1280}
1281
1282template<typename HalPolicy,
1283 typename HalOperation = typename HalPolicy::Operation,
1284 typename HalModel = typename HalPolicy::Model>
1285bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1286{
1287 armnn::ActivationDescriptor desc;
1288 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1289 desc.m_A = 1.0f;
1290 desc.m_B = -1.0f;
1291
1292 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1293}
1294
1295template<typename HalPolicy,
1296 typename HalOperation = typename HalPolicy::Operation,
1297 typename HalModel = typename HalPolicy::Model>
1298bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1299{
1300 armnn::ActivationDescriptor desc;
1301 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1302 desc.m_A = 6.0f;
1303
1304 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1305}
1306
1307template<typename HalPolicy,
1308 typename HalOperation = typename HalPolicy::Operation,
1309 typename HalModel = typename HalPolicy::Model>
1310bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1311{
1312 armnn::ActivationDescriptor desc;
1313 desc.m_Function = armnn::ActivationFunction::TanH;
1314 desc.m_A = 1.0f; // android nn does not support tanH parameters
1315 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1316
1317 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1318}
1319
1320template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001321 typename HalOperation = typename HalPolicy::Operation,
1322 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001323bool ConvertPaddings(const HalOperation& operation,
1324 const HalModel& model,
1325 ConversionData& data,
1326 unsigned int rank,
1327 armnn::PadDescriptor& padDescriptor)
1328{
1329 using HalOperand = typename HalPolicy::Operand;
1330
1331 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1332 if (!paddingsOperand)
1333 {
1334 return Fail("%s: Could not read paddings operand", __func__);
1335 }
1336
1337 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1338 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1339 {
1340 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1341 }
1342
1343 std::vector<int32_t> paddings;
1344 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1345
1346 // add padding for each dimension of input tensor.
1347 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1348 {
1349 int paddingBeforeInput = paddings[i];
1350 int paddingAfterInput = paddings[i + 1];
1351
1352 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1353 {
1354 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1355 }
1356
1357 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1358 }
1359
1360 return true;
1361}
1362
1363template<typename HalPolicy,
1364 typename HalOperation = typename HalPolicy::Operation,
1365 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001366bool ConvertPooling2d(const HalOperation& operation,
1367 const char* operationName,
1368 armnn::PoolingAlgorithm poolType,
1369 const HalModel& model,
1370 ConversionData& data)
1371{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001372 using HalOperand = typename HalPolicy::Operand;
1373 using HalOperandType = typename HalPolicy::OperandType;
1374
1375 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001376 if (!input.IsValid())
1377 {
1378 return Fail("%s: Could not read input 0", operationName);
1379 }
1380
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001381 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001382 if (!output)
1383 {
1384 return Fail("%s: Could not read output 0", __func__);
1385 }
1386
1387 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1388 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1389
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001390 if (IsDynamicTensor(outputInfo))
1391 {
1392 return Fail("%s: Dynamic output tensors are not supported", __func__);
1393 }
1394
arovir01b0717b52018-09-05 17:03:25 +01001395 armnn::Pooling2dDescriptor desc;
1396 desc.m_PoolType = poolType;
1397 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001398 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001399
1400 ActivationFn activation;
1401
Sadik Armagan15d63e22019-07-26 16:59:35 +01001402 auto inputSize = operation.inputs.size();
1403
1404 if (inputSize >= 10)
1405 {
1406 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1407 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1408 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1409 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1410 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1411 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1412 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1413 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1414 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1415 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1416 {
1417 return Fail("%s: Operation has invalid inputs", operationName);
1418 }
1419
1420 if (Is12Operand(*output))
1421 {
1422 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1423 }
1424 }
1425 else
arovir01b0717b52018-09-05 17:03:25 +01001426 {
1427 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1428 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001429 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1430 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1431 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1432 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1433 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1434 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001435 {
1436 return Fail("%s: Operation has invalid inputs", operationName);
1437 }
1438
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001439 const unsigned int inputWidth = inputInfo.GetShape()[2];
1440 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001441
1442 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1443 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
Sadik Armagan15d63e22019-07-26 16:59:35 +01001444
1445 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001446 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001447 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001448 }
1449 }
1450
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001451 bool isSupported = false;
1452 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1453 IsPooling2dSupported,
1454 data.m_Backends,
1455 isSupported,
1456 inputInfo,
1457 outputInfo,
1458 desc);
1459 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001460 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001461 return false;
arovir01b0717b52018-09-05 17:03:25 +01001462 }
arovir01b0717b52018-09-05 17:03:25 +01001463
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001464 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1465 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001466 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001467 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001468 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001469
1470 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1471 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001472 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001473 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001474 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001475
1476 input.Connect(pooling2dLayer->GetInputSlot(0));
1477
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001478 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001479}
1480
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001481template<typename HalPolicy,
Mike Kellyb8805202019-07-31 17:25:43 +01001482 typename Operation = typename HalPolicy::Operation,
1483 typename Model = typename HalPolicy::Model>
Mike Kelly46272802019-08-14 17:00:48 +01001484bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
1485{
1486 using Operand = typename HalPolicy::Operand;
1487
1488 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1489 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1490
1491 if (!input0.IsValid() || !input1.IsValid())
1492 {
1493 return Fail("%s: Operation has invalid inputs", __func__);
1494 }
1495
1496 // The FuseActivation parameter is always the input index 2
1497 // and it should be optional
1498 ActivationFn activationFunction;
1499 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1500 {
1501 return Fail("%s: Operation has invalid inputs", __func__);
1502 }
1503
1504 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1505 if (!outputOperand)
1506 {
1507 return false;
1508 }
1509
1510 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
1511 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
1512
1513 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1514 if (IsDynamicTensor(outputInfo))
1515 {
1516 return Fail("%s: Dynamic output tensors are not supported", __func__);
1517 }
1518
1519 bool isSupported = false;
1520 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1521 IsAdditionSupported,
1522 data.m_Backends,
1523 isSupported,
1524 inputInfo0,
1525 inputInfo1,
1526 outputInfo);
1527 if (!isSupported)
1528 {
1529 return false;
1530 }
1531
1532 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
1533 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1534
1535 if (endLayer != nullptr)
1536 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01001537 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1538 if (!isReshapeSupported)
1539 {
1540 return false;
1541 }
1542
Mike Kelly46272802019-08-14 17:00:48 +01001543 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1544 }
1545 else
1546 {
1547 return Fail("%s: ProcessActivation failed", __func__);
1548 }
1549}
1550
1551template<typename HalPolicy,
1552 typename Operation = typename HalPolicy::Operation,
1553 typename Model = typename HalPolicy::Model>
Mike Kellyb8805202019-07-31 17:25:43 +01001554bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
1555{
1556 using HalOperand = typename HalPolicy::Operand;
1557 using HalOperandType = typename HalPolicy::OperandType;
1558
1559 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1560 if (operation.inputs.size() <= 1)
1561 {
1562 return Fail("%s: Operation has insufficient arguments", __func__);
1563 }
1564
1565 // Get inputs and outputs
1566 const std::size_t numInputTensors = operation.inputs.size() - 1;
1567
1568 int32_t concatDim;
1569 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1570 {
1571 return Fail("%s: Operation has invalid inputs", __func__);
1572 }
1573
1574 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1575 if (!outputOperand)
1576 {
1577 return Fail("%s: Operation has no outputs", __func__);
1578 }
1579
1580
1581 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1582 armnn::TensorShape outputShape = outputInfo.GetShape();
1583
1584 //
1585 // handle negative concat dims along the lines of tensorflow as described here:
1586 // https://www.tensorflow.org/api_docs/python/tf/concat
1587 // "negative axis refers to axis + rank(values)-th dimension"
1588 //
1589 if (concatDim < 0)
1590 {
1591 concatDim += outputShape.GetNumDimensions();
1592 }
1593
1594 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1595 {
1596 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1597 }
1598
1599 std::vector<LayerInputHandle> inputHandles;
1600 std::vector<armnn::TensorShape> inputShapes;
1601
1602 inputHandles.reserve(numInputTensors);
1603 inputShapes.reserve(numInputTensors);
1604
1605 bool inputsHaveBeenReshaped = false;
1606 unsigned int tensorDimensionsAdded = 0;
1607
1608 for (uint32_t i = 0; i < numInputTensors; ++i)
1609 {
1610 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1611 if (!operand)
1612 {
1613 return Fail("%s: Operation has invalid inputs", __func__);
1614 }
1615
Teresa Charlin3b959602019-10-31 17:05:47 +00001616 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1617 if (!operandInputHandle.IsValid())
1618 {
1619 return Fail("%s: Operation has invalid inputs", __func__);
1620 }
Mike Kellyb8805202019-07-31 17:25:43 +01001621
Teresa Charlin3b959602019-10-31 17:05:47 +00001622 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb8805202019-07-31 17:25:43 +01001623 if (operandShape.GetNumDimensions() == 0)
1624 {
1625 return Fail("%s: Operands with rank 0 are not supported", __func__);
1626 }
1627
1628 if (RequiresReshape(operandShape))
1629 {
1630 inputsHaveBeenReshaped = true;
1631
1632 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1633
1634 // Expand the tensor to three dimensions
1635 if (operandShape.GetNumDimensions() == 2)
1636 {
1637 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1638 tensorDimensionsAdded = 1;
1639 }
1640 else
1641 {
1642 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1643 tensorDimensionsAdded = 2;
1644 }
1645
1646 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1647 *data.m_Network,
1648 operandInputHandle,
1649 reshapeInfo
1650 );
1651
1652 // Point to the reshape operation rather then the input operation
1653 operandShape = reshapeInfo.GetShape();
1654 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1655 }
1656
1657 inputShapes.emplace_back(operandShape);
1658 inputHandles.emplace_back(operandInputHandle);
1659
1660 if (!inputHandles.back().IsValid())
1661 {
1662 return Fail("%s: Operation has invalid inputs", __func__);
1663 }
1664 }
1665
1666 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1667
1668 if (inputsHaveBeenReshaped)
1669 {
1670 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1671 concatDim += tensorDimensionsAdded;
1672
1673 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1674 if (tensorDimensionsAdded == 1)
1675 {
1676 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1677 }
1678 else if (tensorDimensionsAdded == 2)
1679 {
1680 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1681 }
1682 }
1683
1684 // Check if permutations is required and get the pair of permutations required for the concatenation.
1685 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1686 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1687 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1688
1689 bool needPermute =
1690 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1691
1692 if (needPermute)
1693 {
1694 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1695 }
1696
1697 outputInfo.SetShape(outputShape);
1698
1699 // this is no-op for identity swizzles, otherwise it replaces both
1700 // the handles and shapes with the swizzled layer output handles and shapes
1701 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
1702
1703 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1704 armnn::OriginsDescriptor concatDescriptor;
1705
1706 try
1707 {
1708 // The concat descriptor is always created across the only supported concat dimension
1709 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1710 concatDescriptor =
1711 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1712 }
1713 catch (const armnn::Exception& error)
1714 {
1715 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1716 }
1717
1718 // Validate the output shape is correct given the input shapes based on the
1719 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1720 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1721 {
1722 return Fail("%s: Error validating the output shape for concat", __func__);
1723 }
1724
1725 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1726 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1727 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1728
1729 bool isSupported = false;
1730 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1731 IsConcatSupported,
1732 data.m_Backends,
1733 isSupported,
1734 inputTensorInfos,
1735 outputInfo,
1736 concatDescriptor);
1737 if (!isSupported)
1738 {
1739 return false;
1740 }
1741
1742 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1743 assert(layer != nullptr);
1744 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1745
1746 // Connect inputs to the layer
1747 const int numInputSlots = layer->GetNumInputSlots();
1748 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1749 for (int i = 0; i < numInputSlots; ++i)
1750 {
1751 // connect the input directly to the merge (concat) layer
1752 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1753 }
1754
1755 if (needPermute)
1756 {
1757 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1758 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1759 layer->GetOutputSlot(0),
1760 permutationPair.second);
1761 layer = &deswizzleLayer;
1762 }
1763
1764 if (inputsHaveBeenReshaped)
1765 {
1766 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1767
1768 // Undo the reshape knowing the amount of dimensions added
1769 if (tensorDimensionsAdded == 1)
1770 {
1771 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1772 afterConcatInfo.GetShape()[2] }));
1773 }
1774 else if (tensorDimensionsAdded == 2)
1775 {
1776 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1777 }
1778
1779 layer = &AddReshapeLayer(
1780 *data.m_Network,
1781 layer->GetOutputSlot(0),
1782 afterConcatInfo
1783 );
1784 }
1785
1786 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1787}
1788
1789template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001790 typename HalOperation = typename HalPolicy::Operation,
1791 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001792bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1793{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001794 using HalOperand = typename HalPolicy::Operand;
1795 using HalOperandType = typename HalPolicy::OperandType;
1796
1797 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001798 if (!input.IsValid())
1799 {
1800 return Fail("%s: Operation has invalid inputs", __func__);
1801 }
1802
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001803 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001804 if (!output)
1805 {
1806 return Fail("%s: Could not read output 0", __func__);
1807 }
1808
1809 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001810 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001811
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001812 if (IsDynamicTensor(outputInfo))
1813 {
1814 return Fail("%s: Dynamic output tensors are not supported", __func__);
1815 }
1816
Mike Kellyb5fdf382019-06-11 16:35:25 +01001817 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001818 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1819 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001820
1821 if (!weightsPin.IsValid() || !biasPin.IsValid())
1822 {
1823 return Fail("%s: Operation has invalid inputs", __func__);
1824 }
1825
1826 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001827 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001828 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1829
1830 armnn::Convolution2dDescriptor desc;
1831 desc.m_DataLayout = armnn::DataLayout::NHWC;
1832 ActivationFn activation;
1833
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001834 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001835 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001836 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1837 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1838 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1839 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1840 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1841 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001842 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001843 {
1844 return Fail("%s: Operation has invalid inputs", __func__);
1845 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001846 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001847 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001848 {
1849 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001850 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1851 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1852 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001853 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001854 {
1855 return Fail("%s: Operation has invalid inputs", __func__);
1856 }
1857
1858 const uint32_t kernelX = weights.GetShape()[2];
1859 const uint32_t kernelY = weights.GetShape()[1];
1860 const uint32_t inputX = inputInfo.GetShape()[2];
1861 const uint32_t inputY = inputInfo.GetShape()[1];
1862
1863 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1864 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001865 }
1866 else
1867 {
1868 return Fail("%s: Unsupported number of operation inputs", __func__);
1869 }
1870
1871 desc.m_BiasEnabled = true;
1872 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1873
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001874 bool isSupported = false;
1875 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1876 IsConvolution2dSupported,
1877 data.m_Backends,
1878 isSupported,
1879 inputInfo,
1880 outputInfo,
1881 desc,
1882 weights.GetInfo(),
1883 biases);
1884 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001885 {
1886 return false;
1887 }
1888
1889 armnn::IConnectableLayer* startLayer =
1890 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1891
1892 if (!startLayer)
1893 {
1894 return Fail("%s: AddConvolution2dLayer failed", __func__);
1895 }
1896
1897 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1898
1899 if (!endLayer)
1900 {
1901 return Fail("%s: ProcessActivation failed", __func__);
1902 }
1903
1904 input.Connect(startLayer->GetInputSlot(0));
1905
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001906 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001907}
1908
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001909template<typename HalPolicy,
1910 typename HalOperation = typename HalPolicy::Operation,
1911 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +01001912bool ConvertDepthToSpace(const HalOperation& operation, const HalModel& model, ConversionData& data)
1913{
1914 using HalOperand = typename HalPolicy::Operand;
1915 using HalOperandType = typename HalPolicy::OperandType;
1916
1917 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1918 if (!input.IsValid() )
1919 {
1920 return Fail("%s: Operation has invalid inputs", __func__);
1921 }
1922
1923 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1924 unsigned int rank = inputInfo.GetNumDimensions();
1925 if (rank != 4)
1926 {
1927 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1928 }
1929
1930 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1931 if (!output)
1932 {
1933 return Fail("%s: Could not read output 0", __func__);
1934 }
1935
1936 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1937 if (IsDynamicTensor(outputInfo))
1938 {
1939 return Fail("%s: Dynamic output tensors are not supported", __func__);
1940 }
1941
1942 armnn::DepthToSpaceDescriptor descriptor;
1943
1944 GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, descriptor.m_BlockSize, model, data);
1945 if (descriptor.m_BlockSize <= 1)
1946 {
1947 return Fail("%s: Block size must be at least 1 in all dimensions");
1948 }
1949
1950 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
1951 if (Is12Operand(*output))
1952 {
1953 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
1954 }
1955
1956 bool isSupported = false;
1957 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1958 IsDepthToSpaceSupported,
1959 data.m_Backends,
1960 isSupported,
1961 inputInfo,
1962 outputInfo,
1963 descriptor);
1964 if (!isSupported)
1965 {
1966 return false;
1967 }
1968
1969 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
1970 assert(layer != nullptr);
1971 input.Connect(layer->GetInputSlot(0));
1972
1973 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1974}
1975
1976template<typename HalPolicy,
1977 typename HalOperation = typename HalPolicy::Operation,
1978 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001979bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1980{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001981 using HalOperand = typename HalPolicy::Operand;
1982 using HalOperandType = typename HalPolicy::OperandType;
1983
1984 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001985
1986 if (!input.IsValid())
1987 {
1988 return Fail("%s: Operation has invalid inputs", __func__);
1989 }
1990
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001991 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001992
1993 if (!output)
1994 {
1995 return Fail("%s: Could not read output 0", __func__);
1996 }
1997
1998 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001999 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002000
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002001 if (IsDynamicTensor(outputInfo))
2002 {
2003 return Fail("%s: Dynamic output tensors are not supported", __func__);
2004 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01002005
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002006 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01002007 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002008 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002009
2010 if (weightsOperand == nullptr)
2011 {
2012 return Fail("%s: Operand is invalid", __func__);
2013 }
2014 armnn::DepthwiseConvolution2dDescriptor desc;
2015 desc.m_DataLayout = armnn::DataLayout::NHWC;
2016
Mike Kellyb5fdf382019-06-11 16:35:25 +01002017 // Reinterpret weight data as [ H, W, I, M ]
2018 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
2019 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002020 inputInfo.GetShape()[3],
2021 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01002022
2023 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
2024 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
2025
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002026 const ConstTensorPin weightsPin =
2027 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
2028 1,
2029 model,
2030 data,
2031 HWIMToMIHW,
2032 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002033
2034 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002035 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01002036
2037 if (!weightsPin.IsValid() || !biasPin.IsValid())
2038 {
2039 return Fail("%s: Operation has invalid inputs", __func__);
2040 }
2041
2042 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2043 armnn::ConstTensor bias = biasPin.GetConstTensor();
2044 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2045
2046 ActivationFn activation;
2047
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002048 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002049 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002050 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
2051 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
2052 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
2053 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
2054 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2055 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002056 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002057 {
2058 return Fail("%s: Operation has invalid inputs", __func__);
2059 }
2060 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002061 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002062 {
2063 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01002064 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
2065 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
2066 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002067 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01002068 {
2069 return Fail("%s: Operation has invalid inputs", __func__);
2070 }
2071
2072 const uint32_t kernelX = weights.GetShape()[3];
2073 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01002074 const uint32_t inputX = inputInfo.GetShape()[2];
2075 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01002076
2077 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2078 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2079 }
2080 else
2081 {
2082 return Fail("%s: Unsupported number of operation inputs", __func__);
2083 }
2084
2085 desc.m_BiasEnabled = true;
2086 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2087
Ferran Balaguerd30093c2019-07-09 17:04:47 +01002088 bool isSupported = false;
2089 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2090 IsDepthwiseConvolutionSupported,
2091 data.m_Backends,
2092 isSupported,
2093 inputInfo,
2094 outputInfo,
2095 desc,
2096 weights.GetInfo(),
2097 biases);
2098 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01002099 {
2100 return false;
2101 }
2102
2103 armnn::IConnectableLayer* startLayer =
2104 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2105 if (!startLayer)
2106 {
2107 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
2108 }
2109
2110 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2111 if (!endLayer)
2112 {
2113 return Fail("%s: ProcessActivation failed", __func__);
2114 }
2115
2116 input.Connect(startLayer->GetInputSlot(0));
2117
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002118 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01002119}
2120
Mike Kelly3c673942019-07-25 09:26:06 +01002121template<typename HalPolicy,
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002122 typename HalOperation = typename HalPolicy::Operation,
2123 typename HalModel = typename HalPolicy::Model>
2124bool IsOperandConstant(const HalOperation& operation,
2125 uint32_t inputIndex,
2126 const HalModel& model,
2127 bool& isConstant)
2128{
2129 using HalOperand = typename HalPolicy::Operand;
2130 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
2131
2132 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
2133 if (!operand)
2134 {
2135 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
2136 }
2137
2138 isConstant = operand->lifetime == HalOperandLifeTime::CONSTANT_COPY ||
2139 operand->lifetime == HalOperandLifeTime::CONSTANT_REFERENCE ||
2140 operand->lifetime == HalOperandLifeTime::NO_VALUE;
2141
2142 return true;
2143}
2144
2145template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01002146 typename Operation = typename HalPolicy::Operation,
2147 typename Model = typename HalPolicy::Model>
2148bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
Mike Kelly3c673942019-07-25 09:26:06 +01002149{
Mike Kelly46272802019-08-14 17:00:48 +01002150 using Operand = typename HalPolicy::Operand;
2151
2152 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2153 if (!input.IsValid())
2154 {
2155 return Fail("%s: Operation has invalid input", __func__);
2156 }
2157
2158 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2159 if (!outputOperand)
2160 {
2161 return Fail("%s: Operation has invalid outputs", __func__);
2162 }
2163
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002164 // If the output is going into the FC weights and input is const just return true
2165 const size_t outputIndex = operation.outputs[0];
2166 bool input_is_constant = false;
2167 if (!IsOperandConstant<HalPolicy>(operation,0,model,input_is_constant) && input_is_constant)
2168 {
2169 return Fail("Non const input not supported");
2170 }
2171
2172 // Iterate through the nodes and find the operation feeding from the Dequantize output operand
2173 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); ++operationIdx)
2174 {
2175 // Search for the FC op which consumes the output of Dequantize with index equal to outputIndex
2176 const auto& operationIt = model.operations[operationIdx];
2177 switch (operationIt.type)
2178 {
2179 case HalPolicy::OperationType::FULLY_CONNECTED:
2180 if (outputIndex == operationIt.inputs[1]) // Weights are bound to slot 1
2181 {
2182 // If the output is going into the FC weights and input is const just return true
2183 return true;
2184 }
2185 break;
2186 case HalPolicy::OperationType::LSTM:
2187 for (size_t k = 0; k < operationIt.inputs.size(); ++k)
2188 {
2189 if (outputIndex == operationIt.inputs[k])
2190 {
2191 // If the output is going into the LSTM weights and input is const just return true
2192 return true;
2193 }
2194 }
2195 break;
2196 default:
2197 break;
2198 }
2199 }
2200
Mike Kelly46272802019-08-14 17:00:48 +01002201 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2202 if (IsDynamicTensor(outputInfo))
2203 {
2204 return Fail("%s: Dynamic output tensors are not supported", __func__);
2205 }
2206
2207 bool isSupported = false;
2208 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2209 IsDequantizeSupported,
2210 data.m_Backends,
2211 isSupported,
2212 input.GetTensorInfo(),
2213 GetTensorInfoForOperand(*outputOperand));
2214 if (!isSupported)
2215 {
2216 return false;
2217 }
2218
2219 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
2220 assert(layer != nullptr);
2221 input.Connect(layer->GetInputSlot(0));
2222
2223 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2224}
2225
2226template<typename HalPolicy,
2227 typename Operation = typename HalPolicy::Operation,
2228 typename Model = typename HalPolicy::Model>
2229bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
2230{
2231 using Operand = typename HalPolicy::Operand;
2232
2233 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2234 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2235
2236 if (!input0.IsValid() || !input1.IsValid())
2237 {
2238 return Fail("%s: Operation has invalid inputs", __func__);
2239 }
2240
2241 // The FuseActivation parameter is always the input index 2
2242 // and it should be optional
2243 ActivationFn activationFunction;
2244 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2245 {
2246 return Fail("%s: Operation has invalid inputs", __func__);
2247 }
2248
2249 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2250 if (!output)
2251 {
2252 return Fail("%s: Could not read output 0", __func__);
2253 }
2254
2255 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2256 if (IsDynamicTensor(outputInfo))
2257 {
2258 return Fail("%s: Dynamic output tensors are not supported", __func__);
2259 }
2260
2261 bool isSupported = false;
2262 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2263 IsDivisionSupported,
2264 data.m_Backends,
2265 isSupported,
2266 input0.GetTensorInfo(),
2267 input1.GetTensorInfo(),
2268 outputInfo);
2269 if (!isSupported)
2270 {
2271 return false;
2272 }
2273
2274 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
2275 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2276
2277 if (endLayer)
2278 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002279 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2280 if (!isReshapeSupported)
2281 {
2282 return false;
2283 }
2284
Mike Kelly46272802019-08-14 17:00:48 +01002285 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2286 }
2287 return Fail("%s: ProcessActivation failed", __func__);
2288}
2289
2290template<typename HalPolicy,
2291 typename Operation = typename HalPolicy::Operation,
2292 typename Model = typename HalPolicy::Model>
2293bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
2294{
2295 using Operand = typename HalPolicy::Operand;
2296
2297 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2298 if (!input.IsValid())
2299 {
2300 return Fail("%s: Operation has invalid inputs", __func__);
2301 }
2302
2303 const Operand* const outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2304 if (!outputOperand)
2305 {
2306 return Fail("%s: Operation has invalid outputs", __func__);
2307 }
2308
2309 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2310 if (IsDynamicTensor(outputInfo))
2311 {
2312 return Fail("%s: Dynamic output tensors are not supported", __func__);
2313 }
2314
2315 bool isSupported = false;
2316 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2317 IsFloorSupported,
2318 data.m_Backends,
2319 isSupported,
2320 input.GetTensorInfo(),
2321 outputInfo);
2322 if (!isSupported)
2323 {
2324 return false;
2325 }
2326
2327 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
2328 assert(layer != nullptr);
2329 input.Connect(layer->GetInputSlot(0));
2330
2331 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2332}
2333
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002334inline bool IsQSymm8(const V1_0::Operand&)
2335{
2336 return false;
2337}
2338
2339#ifdef ARMNN_ANDROID_NN_V1_2
2340
2341inline bool IsQSymm8(const V1_2::Operand& operand)
2342{
2343 return operand.type == V1_2::OperandType::TENSOR_QUANT8_SYMM;
2344}
2345
2346#endif
2347
2348template<typename HalPolicy,
2349 typename Operation = typename HalPolicy::Operation,
2350 typename Model = typename HalPolicy::Model>
2351std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo>
2352DequantizeIfRequired(size_t operand_index, const Operation& operation, const Model& model, const ConversionData& data)
2353{
2354 using Operand = typename HalPolicy::Operand;
2355
2356 bool weights_constant = false;
2357 if (!(IsOperandConstant<HalPolicy>(operation, operand_index, model, weights_constant) && !weights_constant))
2358 {
2359 return { nullptr, 0, armnn::TensorInfo() };
2360 }
2361
2362 const size_t weightsInputIndex = operation.inputs[operand_index];
2363
2364 // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
2365 // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
2366 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); ++operationIdx)
2367 {
2368 const auto& operationIt = model.operations[operationIdx];
2369 size_t outOpIndex = weightsInputIndex + 1;
2370
2371 // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
2372 if (operationIt.type != HalPolicy::OperationType::DEQUANTIZE)
2373 {
2374 continue;
2375 }
2376
2377 for (size_t i = 0; outOpIndex != weightsInputIndex && i < operation.outputs.size(); ++i)
2378 {
2379 outOpIndex = operationIt.outputs[i];
2380 break;
2381 }
2382
2383 if (outOpIndex != weightsInputIndex)
2384 {
2385 break;
2386 }
2387
2388 const Operand* operand = GetInputOperand<HalPolicy>(operationIt, 0, model);
2389 BOOST_ASSERT(operand);
2390
2391 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(*operand);
2392 if (!IsQSymm8(*operand))
2393 {
2394 // Only supporting dequantize from QSYMM8 to FLOAT
2395 break;
2396 }
2397
2398 // Allocate a new buffer for the dequantized data and manually dequantize
2399 const void* startValue = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
2400 if (!startValue)
2401 {
2402 // Failed to get the operand address
2403 break;
2404 }
2405
2406 const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
2407 size_t dequantizedBufferLength = operand->location.length;
2408 const float quantizationScale = tensorInfo.GetQuantizationScale();
2409 auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
2410 for (size_t i = 0; i < dequantizedBufferLength; ++i)
2411 {
2412 float* dstPtr = dequantizedBuffer.get();
2413 BOOST_ASSERT(dstPtr);
2414 *dstPtr++ = quantizedBuffer[i] * quantizationScale;
2415 }
2416
2417 tensorInfo.SetDataType(armnn::DataType::Float32);
2418 return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float), std::move(tensorInfo) };
2419 }
2420
2421 return { nullptr, 0, armnn::TensorInfo() };
2422}
2423
2424template<typename HalPolicy,
2425 typename Operation = typename HalPolicy::Operation,
2426 typename Model = typename HalPolicy::Model>
2427ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation& operation,
2428 const Model& model,
2429 const ConversionData& data,
2430 size_t operandIndex,
2431 bool optional = false)
2432{
2433 auto dequantized = DequantizeIfRequired<HalPolicy, Operation, Model>(operandIndex,operation, model, data);
2434 if (std::get<1>(dequantized) == 0 && optional)
2435 {
2436 // Optional tensor with no values is not really an error. Return it as invalid, but marked as optional
2437 return ConstTensorPin(true);
2438 }
2439
2440 return std::get<1>(dequantized) ?
2441 ConstTensorPin(std::get<2>(dequantized), std::get<0>(dequantized).get(),
2442 std::get<1>(dequantized), g_DontPermute):
2443 ConvertOperationInputToConstTensorPin<HalPolicy>(operation, operandIndex, model, data);
2444}
2445
2446
Mike Kelly46272802019-08-14 17:00:48 +01002447template<typename HalPolicy,
2448 typename Operation = typename HalPolicy::Operation,
2449 typename Model = typename HalPolicy::Model>
2450bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
2451{
2452 using Operand = typename HalPolicy::Operand;
Mike Kelly46272802019-08-14 17:00:48 +01002453 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2454 if (!input.IsValid())
2455 {
2456 return Fail("%s: Operation has invalid inputs", __func__);
2457 }
2458
2459 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2460 if (!output)
2461 {
2462 return Fail("%s: Could not read output 0", __func__);
2463 }
2464
2465 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2466 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2467
2468 if (IsDynamicTensor(outputInfo))
2469 {
2470 return Fail("%s: Dynamic output tensors are not supported", __func__);
2471 }
2472
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002473 ConstTensorPin weightsPin = DequantizeAndMakeConstTensorPin<HalPolicy, Operation, Model>(operation, model, data, 1);
Mike Kelly46272802019-08-14 17:00:48 +01002474
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002475 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data); // 1D
2476
2477 if (!weightsPin.IsValid())
Mike Kelly46272802019-08-14 17:00:48 +01002478 {
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002479 return Fail("%s: Operation has invalid weights", __func__);
2480 }
2481
2482 if (!biasPin.IsValid())
2483 {
2484 return Fail("%s: Operation has invalid bias", __func__);
Mike Kelly46272802019-08-14 17:00:48 +01002485 }
2486
2487 armnn::ConstTensor weights = weightsPin.GetConstTensor();
2488 armnn::ConstTensor bias = biasPin.GetConstTensor();
2489 armnn::TensorInfo reshapedInfo = inputInfo;
2490
2491 try
2492 {
2493 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
Pablo Tellofb45e2f2019-10-18 16:51:57 +01002494 }
2495 catch (const std::exception& e)
2496 {
Mike Kelly46272802019-08-14 17:00:48 +01002497 return Fail("%s: %s", __func__, e.what());
2498 }
2499
2500 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
2501 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
2502
2503 ActivationFn activationFunction;
2504 if (!GetInputActivationFunction<HalPolicy>(operation, 3, activationFunction, model, data))
2505 {
2506 return Fail("%s: Operation has invalid inputs", __func__);
2507 }
2508
2509 armnn::FullyConnectedDescriptor desc;
2510 desc.m_TransposeWeightMatrix = true;
2511 desc.m_BiasEnabled = true;
2512
2513 bool isSupported = false;
2514 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2515 IsFullyConnectedSupported,
2516 data.m_Backends,
2517 isSupported,
2518 reshapedInfo,
2519 outputInfo,
2520 weights.GetInfo(),
2521 bias.GetInfo(),
2522 desc);
2523 if (!isSupported)
2524 {
2525 return false;
2526 }
2527
2528 armnn::IConnectableLayer* startLayer =
2529 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2530 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2531
2532 if (endLayer != nullptr)
2533 {
2534 if (inputInfo.GetNumDimensions() > 2U)
2535 {
2536 armnn::ReshapeDescriptor reshapeDescriptor;
2537 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2538
2539 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2540 assert(reshapeLayer != nullptr);
2541 input.Connect(reshapeLayer->GetInputSlot(0));
2542 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2543 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2544 }
2545 else
2546 {
2547 input.Connect(startLayer->GetInputSlot(0));
2548 }
2549
2550 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2551 }
2552 else
2553 {
2554 return Fail("%s: ProcessActivation failed", __func__);
2555 }
2556}
2557
2558template<typename HalPolicy,
2559 typename Operation = typename HalPolicy::Operation,
2560 typename Model = typename HalPolicy::Model>
2561bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2562{
Mike Kelly999e2092019-08-15 10:46:46 +01002563 if (operation.inputs.size() != 1)
2564 {
2565 return Fail("%s: Optional inputs are not supported", __func__);
2566 }
2567
Mike Kelly46272802019-08-14 17:00:48 +01002568 using Operand = typename HalPolicy::Operand;
2569
2570 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2571 if (!input.IsValid())
2572 {
2573 return Fail("%s: Operation has invalid inputs", __func__);
2574 }
2575
2576 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2577 if (!output)
2578 {
2579 return Fail("%s: Could not read output 0", __func__);
2580 }
2581
2582 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2583 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2584
2585 if (IsDynamicTensor(outputInfo))
2586 {
2587 return Fail("%s: Dynamic output tensors are not supported", __func__);
2588 }
2589 if (outputInfo.GetNumDimensions() != 4u)
2590 {
2591 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2592 }
2593
2594 armnn::L2NormalizationDescriptor desc;
2595 desc.m_DataLayout = armnn::DataLayout::NHWC;
2596
2597 bool isSupported = false;
2598 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2599 IsL2NormalizationSupported,
2600 data.m_Backends,
2601 isSupported,
2602 inputInfo,
2603 outputInfo,
2604 desc);
2605 if (!isSupported)
2606 {
2607 return false;
2608 }
2609
2610 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
2611 assert(layer != nullptr);
2612 input.Connect(layer->GetInputSlot(0));
2613
2614 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2615}
2616
2617template<typename HalPolicy,
2618 typename Operation = typename HalPolicy::Operation,
2619 typename Model = typename HalPolicy::Model>
2620bool ConvertLocalResponseNormalization(const Operation& operation,
2621 const Model& model,
2622 ConversionData& data)
2623{
Mike Kelly999e2092019-08-15 10:46:46 +01002624 if (operation.inputs.size() != 5)
2625 {
2626 return Fail("%s: Optional inputs are not supported", __func__);
2627 }
2628
Mike Kelly46272802019-08-14 17:00:48 +01002629 using Operand = typename HalPolicy::Operand;
2630 using OperandType = typename HalPolicy::OperandType;
2631
2632 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2633 if (!input.IsValid())
2634 {
2635 return Fail("%s: Operation has invalid inputs", __func__);
2636 }
2637
2638 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2639 if (!output)
2640 {
2641 return Fail("%s: Could not read output 0", __func__);
2642 }
2643
2644 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2645 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2646
2647 if (IsDynamicTensor(outputInfo))
2648 {
2649 return Fail("%s: Dynamic output tensors are not supported", __func__);
2650 }
2651 if (outputInfo.GetNumDimensions() != 4u)
2652 {
2653 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2654 }
2655
2656 armnn::NormalizationDescriptor descriptor;
2657 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2658 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2659 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2660
2661 if (!input.IsValid() ||
2662 !GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2663 !GetInputFloat32<HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
2664 !GetInputFloat32<HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
2665 !GetInputFloat32<HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
2666 {
2667 return Fail("%s: Operation has invalid inputs", __func__);
2668 }
2669
2670 // ArmNN expects normSize to be the full size of the normalization
2671 // window rather than the radius as in AndroidNN.
2672 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2673
2674 bool isSupported = false;
2675 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2676 IsNormalizationSupported,
2677 data.m_Backends,
2678 isSupported,
2679 inputInfo,
2680 outputInfo,
2681 descriptor);
2682 if (!isSupported)
2683 {
2684 return false;
2685 }
2686
2687
2688 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
2689 assert(layer != nullptr);
2690 input.Connect(layer->GetInputSlot(0));
2691
2692 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2693}
2694
2695template<typename HalPolicy,
2696 typename Operation = typename HalPolicy::Operation,
2697 typename Model = typename HalPolicy::Model>
2698bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2699{
2700 using Operand = typename HalPolicy::Operand;
2701
2702 armnn::ActivationDescriptor desc;
2703 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2704
2705 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
2706}
2707
2708template<typename HalPolicy,
2709 typename Operation = typename HalPolicy::Operation,
2710 typename Model = typename HalPolicy::Model>
2711bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
2712{
2713 using Operand = typename HalPolicy::Operand;
2714
2715 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2716 if (!input.IsValid())
2717 {
2718 return Fail("%s: Operation has invalid inputs", __func__);
2719 }
2720
2721 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2722 if (!output)
2723 {
2724 return Fail("%s: Could not read output 0", __func__);
2725 }
2726
2727 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2728 if (IsDynamicTensor(outputInfo))
2729 {
2730 return Fail("%s: Dynamic output tensors are not supported", __func__);
2731 }
2732
2733 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2734 if (!axisOperand)
2735 {
2736 return Fail("%s: Could not read input 1", __func__);
2737 }
2738
2739 std::vector<int32_t> axis;
2740 if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
2741 {
2742 return Fail("%s: Input 1 has invalid values", __func__);
2743 }
2744
2745 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2746
2747 // Convert the axis to unsigned int and remove duplicates.
2748 unsigned int rank = inputInfo.GetNumDimensions();
2749 std::set<unsigned int> uniqueAxis;
2750 std::transform(axis.begin(), axis.end(),
2751 std::inserter(uniqueAxis, uniqueAxis.begin()),
2752 [rank](int i) -> unsigned int { return (i + rank) % rank; });
2753
2754 // Get the "keep dims" flag.
2755 int32_t keepDims = 0;
2756 if (!GetInputInt32<HalPolicy>(operation, 2, keepDims, model, data))
2757 {
2758 return Fail("%s: Could not read input 2", __func__);
2759 }
2760
2761 armnn::MeanDescriptor descriptor;
2762 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
2763 descriptor.m_KeepDims = keepDims > 0;
2764
2765 bool isSupported = false;
2766 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2767 IsMeanSupported,
2768 data.m_Backends,
2769 isSupported,
2770 inputInfo,
2771 outputInfo,
2772 descriptor);
2773 if (!isSupported)
2774 {
2775 return false;
2776 }
2777
2778 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
2779 assert(layer != nullptr);
2780 input.Connect(layer->GetInputSlot(0));
2781
2782 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2783}
2784
2785template<typename HalPolicy,
2786 typename Operation = typename HalPolicy::Operation,
2787 typename Model = typename HalPolicy::Model>
2788bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
2789{
2790 using Operand = typename HalPolicy::Operand;
2791
2792 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2793 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
2794
2795 if (!input0.IsValid() || !input1.IsValid())
2796 {
2797 return Fail("%s: Operation has invalid inputs", __func__);
2798 }
2799
2800 // The FuseActivation parameter is always the input index 2
2801 // and it should be optional
2802 ActivationFn activationFunction;
2803 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
2804 {
2805 return Fail("%s: Operation has invalid inputs", __func__);
2806 }
2807
2808 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2809
2810 if (outputOperand == nullptr)
2811 {
2812 return false;
2813 }
2814
2815 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
2816 if (IsDynamicTensor(outputInfo))
2817 {
2818 return Fail("%s: Dynamic output tensors are not supported", __func__);
2819 }
2820
2821 bool isSupported = false;
2822 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2823 IsMultiplicationSupported,
2824 data.m_Backends,
2825 isSupported,
2826 input0.GetTensorInfo(),
2827 input1.GetTensorInfo(),
2828 outputInfo);
2829 if (!isSupported)
2830 {
2831 return false;
2832 }
2833
2834 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
2835 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2836
2837 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2838 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2839
2840 if (endLayer != nullptr)
2841 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01002842 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
2843 if (!isReshapeSupported)
2844 {
2845 return false;
2846 }
2847
Mike Kelly46272802019-08-14 17:00:48 +01002848 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2849 }
2850 else
2851 {
2852 return Fail("%s: ProcessActivation failed", __func__);
2853 }
2854}
2855
2856template<typename HalPolicy,
2857 typename Operation = typename HalPolicy::Operation,
2858 typename Model = typename HalPolicy::Model>
2859bool ConvertPad(Operation& operation, const Model& model, ConversionData& data)
2860{
2861 using Operand = typename HalPolicy::Operand;
2862
Mike Kelly3c673942019-07-25 09:26:06 +01002863 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2864 if (!input.IsValid())
2865 {
2866 return Fail("%s: Operation has invalid inputs", __func__);
2867 }
2868
2869 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2870 unsigned int rank = inputInfo.GetNumDimensions();
2871
2872 armnn::PadDescriptor descriptor;
2873 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
2874 {
2875 return Fail("%s: Could not convert paddings", __func__);
2876 }
2877
2878 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
2879 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
2880 // (QuantizationOffset - QuantizationOffset) * scale = 0.
2881 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
2882 {
2883 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
2884 }
2885
Mike Kelly46272802019-08-14 17:00:48 +01002886 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kelly3c673942019-07-25 09:26:06 +01002887 if (!output)
2888 {
2889 return Fail("%s: Could not read output", __func__);
2890 }
2891
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002892 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01002893 if (IsDynamicTensor(outputInfo))
2894 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002895 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01002896 }
2897
2898 bool isSupported = false;
2899 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2900 IsPadSupported,
2901 data.m_Backends,
2902 isSupported,
2903 inputInfo,
2904 outputInfo,
2905 descriptor);
2906 if (!isSupported)
2907 {
2908 return false;
2909 }
2910
2911 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
2912 assert(layer != nullptr);
2913 input.Connect(layer->GetInputSlot(0));
2914 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2915
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01002916 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01002917}
2918
Mike Kelly0a879362019-07-29 16:56:31 +01002919template<typename HalPolicy,
2920 typename Operation = typename HalPolicy::Operation,
Mike Kelly46272802019-08-14 17:00:48 +01002921 typename Model = typename HalPolicy::Model>
2922bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
2923{
2924 using Operand = typename HalPolicy::Operand;
2925
2926 const Operand* inputOperand = GetInputOperand<HalPolicy>(operation, 0, model);
2927 const Operand* requestedShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2928 const Operand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
2929
2930 if (inputOperand == nullptr
2931 || requestedShapeOperand == nullptr
2932 || outputOperand == nullptr)
2933 {
2934 return Fail("%s: Operation has invalid inputs", __func__);
2935 }
2936
2937 if (requestedShapeOperand->dimensions.size() != 1)
2938 {
2939 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
2940 __func__, requestedShapeOperand->dimensions.size());
2941 }
2942
2943 std::vector<int32_t> targetDimensions;
2944 if (!GetTensorInt32Values<HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
2945 {
2946 return Fail("%s: Could not read values of input 1", __func__);
2947 }
2948
2949 const Shape inputOperandShape = GetOperandShape(*inputOperand);
2950
2951 Shape requestedShape;
2952 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
2953 // function that resolves these values into a fully specified tensor shape.
2954 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
2955 {
2956 return Fail("%s: Failed to resolve the requested shape", __func__);
2957 }
2958
2959 const Shape outputOperandShape = GetOperandShape(*outputOperand);
2960 if (!SameShape(requestedShape, outputOperandShape))
2961 {
2962 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
2963 }
2964
2965 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2966 if (!input.IsValid())
2967 {
2968 return Fail("%s: Could not read input 0", __func__);
2969 }
2970
2971 armnn::ReshapeDescriptor reshapeDescriptor;
2972 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
2973 requestedShape.dimensions.data());
2974
2975 bool isSupported = false;
2976 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2977 IsReshapeSupported,
2978 data.m_Backends,
2979 isSupported,
2980 input.GetTensorInfo(),
2981 reshapeDescriptor);
2982 if (!isSupported)
2983 {
2984 return false;
2985 }
2986
2987 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2988 assert(layer != nullptr);
2989 input.Connect(layer->GetInputSlot(0));
2990
2991 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2992}
2993
2994template<typename HalPolicy,
2995 typename Operation = typename HalPolicy::Operation,
Mike Kelly0a879362019-07-29 16:56:31 +01002996 typename Model = typename HalPolicy::Model>
2997bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
2998{
Mike Kelly46272802019-08-14 17:00:48 +01002999 using Operand = typename HalPolicy::Operand;
3000
Mike Kelly0a879362019-07-29 16:56:31 +01003001 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3002 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
3003
3004 if (!input0.IsValid() || !input1.IsValid())
3005 {
3006 return Fail("%s: Operation has invalid inputs", __func__);
3007 }
3008
3009 // The FuseActivation parameter is always the input index 2
3010 // and it should be optional
3011 ActivationFn activationFunction;
3012 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
3013 {
3014 return Fail("%s: Operation has invalid inputs", __func__);
3015 }
3016
3017 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3018 if (!output)
3019 {
3020 return Fail("%s: Could not read output 0", __func__);
3021 }
3022
3023 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3024 if (IsDynamicTensor(outputInfo))
3025 {
3026 return Fail("%s: Dynamic output tensors are not supported", __func__);
3027 }
3028
3029 bool isSupported = false;
3030 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3031 IsSubtractionSupported,
3032 data.m_Backends,
3033 isSupported,
3034 input0.GetTensorInfo(),
3035 input1.GetTensorInfo(),
3036 outputInfo);
3037 if (!isSupported)
3038 {
3039 return false;
3040 }
3041
3042 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
3043 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
3044
3045 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
3046 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
3047
3048 if (endLayer)
3049 {
Sadik Armagan64b19b52019-08-19 09:49:58 +01003050 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3051 if (!isReshapeSupported)
3052 {
3053 return false;
3054 }
Mike Kelly0a879362019-07-29 16:56:31 +01003055 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
3056 }
3057
3058 return Fail("%s: ProcessActivation failed", __func__);
3059}
3060
Finn Williams23b87b32019-07-30 11:44:05 +01003061template<typename HalPolicy,
Mike Kelly46272802019-08-14 17:00:48 +01003062 typename Operation = typename HalPolicy::Operation,
3063 typename Model = typename HalPolicy::Model>
3064bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
3065{
3066 using Operand = typename HalPolicy::Operand;
3067
3068 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3069 if (!input.IsValid())
3070 {
3071 return Fail("%s: Operation has invalid inputs", __func__);
3072 }
3073
3074 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3075 unsigned int rank = inputInfo.GetNumDimensions();
3076 if (rank > 4)
3077 {
3078 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3079 }
3080
3081 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3082 if (!output)
3083 {
3084 return Fail("%s: Could not read output 0", __func__);
3085 }
3086
3087 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
3088 {
3089 return Fail("%s: Dynamic output tensors are not supported", __func__);
3090 }
3091
3092 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
3093 // if the operand index is out of bounds.
3094 const Operand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3095
3096 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
3097
3098 std::vector<int32_t> axis;
3099 if (!axisOperand)
3100 {
3101 axis.assign(dimensionSequence,
3102 dimensionSequence + rank);
3103 }
3104 else
3105 {
3106 GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data);
3107 }
3108
3109 std::vector<uint32_t> outputDims;
3110 for (unsigned int i = 0; i < rank; i++)
3111 {
3112 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
3113 auto currentDimension = inputInfo.GetShape()[i];
3114 if (skipSqueeze || currentDimension != 1)
3115 {
3116 outputDims.push_back(currentDimension);
3117 }
3118 }
3119
3120 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
3121
3122 armnn::TensorInfo outputInfo = inputInfo;
3123 outputInfo.SetShape(outShape);
3124
3125 armnn::ReshapeDescriptor reshapeDesc;
3126 reshapeDesc.m_TargetShape = outputInfo.GetShape();
3127
3128 bool isSupported = false;
3129 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3130 IsReshapeSupported,
3131 data.m_Backends,
3132 isSupported,
3133 inputInfo,
3134 reshapeDesc);
3135 if (!isSupported)
3136 {
3137 return false;
3138 }
3139
3140 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
3141 assert(layer != nullptr);
3142 input.Connect(layer->GetInputSlot(0));
3143
3144 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3145}
3146
3147template<typename HalPolicy,
3148 typename Operation = typename HalPolicy::Operation,
3149 typename Model = typename HalPolicy::Model>
3150bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
3151{
3152 using Operand = typename HalPolicy::Operand;
3153
3154 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3155 if (!input.IsValid())
3156 {
3157 return Fail("%s: Operation has invalid inputs", __func__);
3158 }
3159
3160 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3161 unsigned int rank = inputInfo.GetNumDimensions();
3162 if (rank > 4)
3163 {
3164 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3165 }
3166
3167 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3168 if (!output)
3169 {
3170 return Fail("%s: Could not read output 0", __func__);
3171 }
3172
3173 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3174 if (IsDynamicTensor(outputInfo))
3175 {
3176 return Fail("%s: Dynamic output tensors are not supported", __func__);
3177 }
3178
3179 const Operand* beginOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3180 const Operand* endOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3181 const Operand* stridesOperand = GetInputOperand<HalPolicy>(operation, 3, model);
3182
3183 std::vector<int32_t> beginValues;
3184 std::vector<int32_t> endValues;
3185 std::vector<int32_t> stridesValues;
3186
3187 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
3188 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
3189 {
3190 if (!GetTensorInt32Values<HalPolicy>(operand, operandValues, model, data))
3191 {
3192 return false;
3193 }
3194
3195 if (operandValues.size() != rank)
3196 {
3197 return false;
3198 }
3199
3200 return true;
3201 };
3202
3203 if (!ValidateInputOperands(*beginOperand, beginValues)
3204 || !ValidateInputOperands(*endOperand, endValues)
3205 || !ValidateInputOperands(*stridesOperand, stridesValues))
3206 {
3207 return Fail("%s: Operation has invalid input operand", __func__);
3208 }
3209
3210 // Stride cannot have value '0'
3211 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
3212 {
3213 return Fail("%s: Stride must be non-zero value.", __func__);
3214 }
3215
3216 armnn::StridedSliceDescriptor descriptor;
3217 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
3218 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
3219 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
3220 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3221
3222 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
3223 if (!GetInputInt32<HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
3224 !GetInputInt32<HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
3225 !GetInputInt32<HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
3226 {
3227 return Fail("%s: Operation has invalid inputs", __func__);
3228 }
3229
3230 bool isSupported = false;
3231 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3232 IsStridedSliceSupported,
3233 data.m_Backends,
3234 isSupported,
3235 inputInfo,
3236 outputInfo,
3237 descriptor);
3238 if (!isSupported)
3239 {
3240 return false;
3241 }
3242
3243 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
3244 assert(layer != nullptr);
3245 input.Connect(layer->GetInputSlot(0));
3246
3247 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3248}
3249
3250template<typename HalPolicy,
3251 typename Operation = typename HalPolicy::Operation,
3252 typename Model = typename HalPolicy::Model>
3253bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
3254{
3255 using Operand = typename HalPolicy::Operand;
3256
3257 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3258 if (!input.IsValid())
3259 {
3260 return Fail("%s: Operation has invalid inputs", __func__);
3261 }
3262
3263 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3264 unsigned int rank = inputInfo.GetNumDimensions();
3265 if (rank > 4)
3266 {
3267 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
3268 }
3269
3270 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
3271 // if the operand index is out of bounds.
3272 const Operand* permOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
3273
3274 std::vector<int32_t> perm(rank);
3275 if (!permOperand)
3276 {
3277 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
3278 for (unsigned int i = rank; i > 0; i--)
3279 {
3280 perm[rank - i] = boost::numeric_cast<int> (i - 1);
3281 }
3282 }
3283 else
3284 {
3285 GetTensorInt32Values<HalPolicy>(*permOperand, perm, model, data);
3286 }
3287
3288 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
3289
3290 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
3291 if (!permutationVector.IsEqual(NHWCToArmNN)
3292 && !permutationVector.IsEqual(ArmNNToNHWC)
3293 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
3294 {
3295 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
3296 }
3297
3298 armnn::PermuteDescriptor permuteDesc;
3299 permuteDesc.m_DimMappings = permutationVector;
3300
3301 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3302 if (!output)
3303 {
3304 return Fail("%s: Could not read output 0", __func__);
3305 }
3306
3307 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3308
3309 bool isSupported = false;
3310 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3311 IsPermuteSupported,
3312 data.m_Backends,
3313 isSupported,
3314 inputInfo,
3315 outputInfo,
3316 permuteDesc);
3317 if (!isSupported)
3318 {
3319 return false;
3320 }
3321
3322 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
3323 assert(layer != nullptr);
3324 input.Connect(layer->GetInputSlot(0));
3325
3326 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3327}
3328
3329template<typename HalPolicy,
Finn Williams23b87b32019-07-30 11:44:05 +01003330 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01003331 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01003332 typename HalModel = typename HalPolicy::Model>
3333bool ConvertBatchToSpaceNd(const HalOperation& operation,
3334 const HalModel& model,
3335 ConversionData& data)
3336{
Finn Williams23b87b32019-07-30 11:44:05 +01003337
3338 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3339 if (!input.IsValid())
3340 {
3341 return Fail("%s: Operation has invalid inputs", __func__);
3342 }
3343
3344 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3345 if (!output)
3346 {
3347 return Fail("%s: Could not read output 0", __func__);
3348 }
3349
3350 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3351 if (IsDynamicTensor(outputInfo))
3352 {
3353 return Fail("%s: Dynamic output tensors are not supported", __func__);
3354 }
3355
3356 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3357 if (!blockOperand)
3358 {
3359 return Fail("%s: Could not read input 1", __func__);
3360 }
3361
3362 // Convert the block operand to int32
3363 std::vector<int32_t> block;
3364 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
3365 {
3366 return Fail("%s: Input 1 has invalid values", __func__);
3367 }
3368
3369 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3370
3371 unsigned int rank = inputInfo.GetNumDimensions();
3372 if (rank != 4)
3373 {
3374 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
3375 }
3376
3377 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
3378 {
3379 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
3380 " greater than or equal to 1", __func__);
3381 }
3382
3383 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
3384 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
3385 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
3386
3387 if (Is12Operand(*output))
3388 {
Finn Williams0e4e4392019-07-31 10:56:27 +01003389 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01003390 }
3391 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
3392 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
3393
3394 bool isSupported = false;
3395 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3396 IsBatchToSpaceNdSupported,
3397 data.m_Backends,
3398 isSupported,
3399 inputInfo,
3400 outputInfo,
3401 batchToSpaceNdDesc);
3402 if (!isSupported)
3403 {
3404 return false;
3405 }
3406
3407 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
3408 assert(layer != nullptr);
3409 input.Connect(layer->GetInputSlot(0));
3410
3411 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3412}
Mike Kelly0a879362019-07-29 16:56:31 +01003413
Finn Williamsd74c5052019-07-30 17:06:00 +01003414template<typename HalPolicy,
3415 typename HalOperation = typename HalPolicy::Operation,
3416 typename HalOperand = typename HalPolicy::Operand,
3417 typename HalModel = typename HalPolicy::Model>
3418bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
3419{
3420 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3421 if (!input.IsValid())
3422 {
3423 return Fail("%s: Operation has invalid inputs", __func__);
3424 }
3425
3426 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3427 unsigned int rank = inputInfo.GetNumDimensions();
3428 unsigned int spatialDim = rank - 2;
3429
3430 if (rank != 4)
3431 {
3432 Fail("%s: Only inputs with rank 4 are supported", __func__);
3433 }
3434
3435 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3436 if (!output)
3437 {
3438 return Fail("%s: Could not read output 0", __func__);
3439 }
3440
3441 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3442 if (IsDynamicTensor(outputInfo))
3443 {
3444 return Fail("%s: Dynamic output tensors are not supported", __func__);
3445 }
3446
3447 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
3448 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
3449
3450 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
3451 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
3452 {
3453 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
3454 }
3455
3456 std::vector<int32_t> blockShape;
3457 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
3458 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
3459 {
3460 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
3461 }
3462
3463 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
3464 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
3465 {
3466 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
3467 }
3468
3469 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
3470 std::vector<int32_t> paddings;
3471 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
3472 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
3473 {
3474 int paddingBeforeInput = paddings[i];
3475 int paddingAfterInput = paddings[i + 1];
3476 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
3477 {
3478 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
3479 }
3480
3481 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
3482 }
3483
3484 armnn::SpaceToBatchNdDescriptor descriptor;
3485 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
3486 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
3487 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
3488
3489 if (Is12Operand(*output))
3490 {
3491 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
3492 }
3493
3494 bool isSupported = false;
3495 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3496 IsSpaceToBatchNdSupported,
3497 data.m_Backends,
3498 isSupported,
3499 inputInfo,
3500 outputInfo,
3501 descriptor);
3502 if (!isSupported)
3503 {
3504 return false;
3505 }
3506
3507 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
3508 assert(layer != nullptr);
3509 input.Connect(layer->GetInputSlot(0));
3510
3511 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3512}
3513
Kevin May407718f2019-09-09 14:46:41 +01003514template<typename HalPolicy,
3515 typename HalOperation = typename HalPolicy::Operation,
3516 typename HalModel = typename HalPolicy::Model>
3517bool ConvertAbs(const HalOperation& operation, const HalModel& model, ConversionData& data)
3518{
3519 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
3520
3521 if (!input.IsValid())
3522 {
3523 return Fail("%s: Operation has invalid input", __func__);
3524 }
3525
3526 using HalOperand = typename HalPolicy::Operand;
3527 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
3528 if (!output)
3529 {
3530 return Fail("%s: Could not read output 0", __func__);
3531 }
3532
3533 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3534 if (IsDynamicTensor(outputInfo))
3535 {
3536 return Fail("%s: Dynamic output tensors are not supported", __func__);
3537 }
3538
3539 bool isSupported = false;
3540 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3541 IsAbsSupported,
3542 data.m_Backends,
3543 isSupported,
3544 input.GetTensorInfo(),
3545 outputInfo);
3546
3547 if (!isSupported)
3548 {
3549 return false;
3550 }
3551
3552 armnn::IConnectableLayer* const layer = data.m_Network->AddAbsLayer();
3553 assert(layer != nullptr);
3554 input.Connect(layer->GetInputSlot(0));
3555
3556 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
3557}
3558
3559
saoste01b8471482018-10-10 09:44:51 +01003560} // namespace armnn_driver