blob: 2fa8a072594777ccaeb168ff1fe886df078d9c5d [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
17#include <ActivationFunctor.h>
18#include <CpuExecutor.h>
19#include <OperationsUtils.h>
20
21#include <boost/assert.hpp>
22#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010023#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010024#include <boost/test/tools/floating_point_comparison.hpp>
25
26#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010027#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010028
29namespace armnn_driver
30{
31
32///
33/// Helper classes
34///
35
36struct ConversionData
37{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010038 ConversionData(const std::vector<armnn::BackendId>& backends)
39 : m_Backends(backends)
40 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010041 {}
42
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010043 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010044 armnn::INetworkPtr m_Network;
45 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
46 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
47};
48
49class LayerInputHandle
50{
51public:
52 LayerInputHandle();
53 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
54
55 bool IsValid() const;
56
57 void Connect(armnn::IInputSlot& inputSlot);
58
59 const armnn::TensorInfo& GetTensorInfo() const;
60
61private:
62 armnn::IOutputSlot* m_OutputSlot;
63 bool m_Valid;
64 armnn::TensorInfo m_TensorInfo;
65};
66
67class ConstTensorPin
68{
69public:
70 // Creates an invalid tensor pin (can be used to signal errors)
71 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
72 ConstTensorPin(bool optional = false);
73
74 // @param tensorInfo TensorInfo associated with the tensor.
75 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
76 // the model being converted.
77 // @param numBytes Number of bytes for the tensor data.
78 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
79 const armnn::PermutationVector& mappings);
80
81 ConstTensorPin(const ConstTensorPin& other) = delete;
82 ConstTensorPin(ConstTensorPin&& other) = default;
83
84 bool IsValid() const;
85 bool IsOptional() const;
86
87 const armnn::ConstTensor& GetConstTensor() const;
88 const armnn::ConstTensor* GetConstTensorPtr() const;
89
90private:
91 armnn::ConstTensor m_ConstTensor;
92
93 // Owned memory for swizzled tensor data, only required if the tensor needed
94 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
95 // the pools associated with the model being converted.
96 std::vector<uint8_t> m_SwizzledTensorData;
97
98 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
99 bool m_Optional;
100};
101
102} // namespace armnn_driver
103
104///
105/// Utility functions
106///
107
108namespace
109{
110
111using namespace armnn_driver;
112using namespace android::nn;
113
114// Convenience function to log the reason for failing to convert a model.
115// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
116template<class... Args>
117static bool Fail(const char* formatStr, Args&&... args)
118{
119 ALOGD(formatStr, std::forward<Args>(args)...);
120 return false;
121}
122
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100123// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
124// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
125#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
126 std::string reasonIfUnsupported; \
127 try { \
128 for (auto&& backendId : backends) \
129 { \
130 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
131 if (layerSupportObject) \
132 { \
133 supported = \
134 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
135 if (supported) \
136 { \
137 break; \
138 } \
139 else \
140 { \
141 if (reasonIfUnsupported.size() > 0) \
142 { \
143 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
144 } \
145 else \
146 { \
147 ALOGD("%s: not supported by armnn", funcName); \
148 } \
149 } \
150 } \
151 else \
152 { \
153 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
154 } \
155 } \
156 if (!supported) \
157 { \
158 ALOGD("%s: not supported by any specified backend", funcName); \
159 } \
160 } catch (const armnn::InvalidArgumentException &e) { \
161 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
arovir01b0717b52018-09-05 17:03:25 +0100162 }
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100163
Mike Kellyb5fdf382019-06-11 16:35:25 +0100164template<typename Operand>
165armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100166{
167 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
168}
169
Matthew Bentham912b3622019-05-03 15:49:14 +0100170inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100171{
Matthew Bentham912b3622019-05-03 15:49:14 +0100172 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
173 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
174 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100175}
176
Mike Kellyb5fdf382019-06-11 16:35:25 +0100177#ifdef ARMNN_ANDROID_NN_V1_2
178
179inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
180{
181 return type == V1_2::OperandType::BOOL ||
182 type == V1_2::OperandType::TENSOR_FLOAT16 ||
183 type == V1_2::OperandType::TENSOR_FLOAT32 ||
184 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
185 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
186 type == V1_2::OperandType::TENSOR_INT32;
187}
188
189#endif
190
191inline bool IsBool(V1_0::Operand)
192{
193 return false;
194}
195
Sadik Armagan61113162019-07-25 09:09:40 +0100196inline bool Is12Operand(V1_0::Operand)
197{
198 return false;
199}
200
Mike Kellyb5fdf382019-06-11 16:35:25 +0100201#ifdef ARMNN_ANDROID_NN_V1_2
202
203inline bool IsBool(V1_2::Operand operand)
204{
205 return operand.type == V1_2::OperandType::BOOL;
206}
207
Sadik Armagan61113162019-07-25 09:09:40 +0100208/// Checks if a operand is 1_2 Operand
209inline bool Is12Operand(V1_2::Operand)
210{
211 return true;
212}
213
Mike Kellyb5fdf382019-06-11 16:35:25 +0100214#endif
215
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100216template<typename LayerHandleType>
217armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
218 armnn::TensorInfo reshapeInfo)
219{
220 armnn::ReshapeDescriptor reshapeDescriptor;
221 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
222
223 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
224 BOOST_ASSERT(reshapeLayer != nullptr);
225
226 // Attach the input layer to the reshape layer
227 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
228 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
229
230 return *reshapeLayer;
231}
232
233void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
234 armnn::IConnectableLayer* startLayer, armnn::INetwork& network)
arovir01b0717b52018-09-05 17:03:25 +0100235{
236 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100237
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100238 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
239 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
240
241 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
242 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
243
244 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100245 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100246 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
247 input0.Connect(startLayer->GetInputSlot(0));
248 input1.Connect(startLayer->GetInputSlot(1));
249
250 return;
251 }
252
253 // Since the number of dimensions do not match then we need to add degenerate dimensions
254 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
255
256 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
257 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
258 boost::numeric_cast<int>(inputDimensions1));
259
260 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
261 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
262 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
263
264 const armnn::TensorShape& smallShape = smallInfo.GetShape();
265 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
266 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
267 {
268 reshapedDimensions[i] = smallShape[i - sizeDifference];
269 }
270
271 armnn::TensorInfo reshapedInfo = smallInfo;
272 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
273 reshapedDimensions.data() });
274 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(network, smallInputHandle, reshapedInfo);
275
276 if (input0IsSmaller)
277 {
278 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
279 //
280 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100281 // | |
282 // Reshape |
283 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100284 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100285
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100286 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
287 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100288 }
289 else
290 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100291 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
292 //
293 // Input0 Input1
294 // | |
295 // | Reshape
296 // \ /
297 // StartLayer
298
arovir01b0717b52018-09-05 17:03:25 +0100299 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100300 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100301 }
302}
303
304void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
305 android::nn::PaddingScheme scheme)
306{
307 int32_t padHead;
308 int32_t padTail;
309 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
310 outPadHead = boost::numeric_cast<uint32_t>(padHead);
311 outPadTail = boost::numeric_cast<uint32_t>(padTail);
312}
313
Mike Kelly86b36d42019-07-12 16:39:33 +0100314#ifdef ARMNN_ANDROID_NN_V1_2
315
316void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
317 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
318{
319 int32_t padHead;
320 int32_t padTail;
321 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
322 outPadHead = boost::numeric_cast<uint32_t>(padHead);
323 outPadTail = boost::numeric_cast<uint32_t>(padTail);
324}
325
326#endif
327
Matthew Bentham912b3622019-05-03 15:49:14 +0100328Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100329{
330 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100331 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100332 shape.dimensions = operand.dimensions;
333 shape.scale = operand.scale;
334 shape.offset = operand.zeroPoint;
335 return shape;
336}
337
338// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
339// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
340// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
341// (us, in this case) to ensure they match.
342void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
343 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
344{
345 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
346 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
347 {
348 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
349 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
350 {
351 ALOGW("Bias quantization scale has been modified to match input*weights");
352 biasInfo.SetQuantizationScale(expectedBiasScale);
353 }
354 }
355}
356
357// 4D Tensor Permutations
358const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
359const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
360const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
361const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
362
363// 3D Permutation Vectors
364const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
365const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
366const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
367
368template<typename OSlot>
369armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
370 const armnn::PermutationVector& mappings)
371{
372 // Add swizzle layer
373 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
374
375 BOOST_ASSERT(layer != nullptr);
376
377 // Connect input to swizzle layer
378 input.Connect(layer->GetInputSlot(0));
379
380 // Setup swizzled output
381 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
382 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
383
384 return *layer;
385}
386
387void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
388{
389 // Add swizzle layer
390 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
391 // Connect swizzled input to layer
392 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
393}
394
395armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
396{
397 // Add deswizzle layer
398 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
399 return deswizzleLayer;
400}
401
402// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
403armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
404 LayerInputHandle& input,
405 armnn::IConnectableLayer& firstLayer,
406 armnn::IConnectableLayer& lastLayer)
407{
408 SwizzleIn(network, input, firstLayer, 0);
409 return DeswizzleOut(network, lastLayer, 0);
410}
411
412// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
413armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
414 armnn::IConnectableLayer& layer)
415{
416 return SwizzleInDeswizzleOut(network, input, layer, layer);
417}
418
419bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
420 const armnn::TensorShape & outputShape,
421 uint32_t concatDim)
422{
423 // Validate the output shape is correct given the input shapes (which have just been validated)
424 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
425 if (outputShape.GetNumDimensions() != numDimensions)
426 {
427 return Fail("%s: Output shape has wrong number of dimensions", __func__);
428 }
429
430 unsigned int outputSizeAlongConcatenatedDimension = 0;
431 for (unsigned int i = 0; i < inputShapes.size(); i++)
432 {
433 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
434 }
435
436 for (unsigned int i = 0; i < numDimensions; ++i)
437 {
438 if (i == concatDim)
439 {
440 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
441 {
442 return Fail(
443 "%s: Invalid output shape for dimension %d (%d != %d)",
444 __func__,
445 i,
446 outputShape[i],
447 outputSizeAlongConcatenatedDimension);
448 }
449 }
450 else
451 {
452 if (outputShape[i] != inputShapes[0][i])
453 {
454 return Fail("%s: Invalid output shape", __func__);
455 }
456 }
457 }
458
459 return true;
460}
461
462bool RequiresReshape(armnn::TensorShape & inputShape)
463{
464 return inputShape.GetNumDimensions() < 3;
465}
466
arovir01b0717b52018-09-05 17:03:25 +0100467void SwizzleInputs(armnn::INetwork& network,
468 std::vector<LayerInputHandle>& inputs,
469 std::vector<armnn::TensorShape>& inputShapes,
470 const armnn::PermutationVector& mapping)
471{
472 if (!mapping.IsEqual(IdentityPermutation4D))
473 {
474 size_t nInputs = inputs.size();
475 for (size_t i=0; i<nInputs; ++i)
476 {
477 // add swizzle layer
478 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
479 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
480 auto& outputInfo = outputSlot.GetTensorInfo();
481 // replace inputs with the swizzled ones
482 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
483 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
484 }
485 }
486}
487
narpra01f176d5a2018-11-18 20:17:48 +0000488bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
489 int32_t & concatDimension,
490 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100491{
narpra01f176d5a2018-11-18 20:17:48 +0000492 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100493 BOOST_ASSERT(numberOfDimensions >= 3);
494
495 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000496 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
497 // or along dimension 0 or 2 for a 3-D tensor.
498 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100499 {
narpra01f176d5a2018-11-18 20:17:48 +0000500 concatDimension = 1;
501 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
502 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100503 }
narpra01f176d5a2018-11-18 20:17:48 +0000504 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100505 {
narpra01f176d5a2018-11-18 20:17:48 +0000506 concatDimension = 0;
507 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
508 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100509 }
narpra01f176d5a2018-11-18 20:17:48 +0000510 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100511}
512
513} // anonymous namespace
514
515namespace armnn_driver
516{
517
518//// Creates an ArmNN activation layer and connects it to the given layer, if the
519//// passed in AndroidNN activation function requires so.
520//// @return The end layer of the sequence of layers built for the given AndroidNN
521//// activation function or nullptr if an error occurred (e.g. unsupported activation).
522//// Note that the end layer matches the input layer if no activation is required
523//// (the sequence of layers has length 1).
524armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
525 ActivationFn activation,
526 armnn::IConnectableLayer* prevLayer,
527 ConversionData& data);
528
529} // namespace armnn_driver
530
531///
532/// Utility templates
533///
534
535namespace armnn_driver
536{
537
538using namespace android::nn;
539
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100540template<typename HalPolicy,
541 typename HalOperand = typename HalPolicy::Operand,
542 typename HalOperation = typename HalPolicy::Operation,
543 typename HalModel = typename HalPolicy::Model>
544const HalOperand* GetInputOperand(const HalOperation& operation,
545 uint32_t inputIndex,
546 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100547 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100548{
549 if (inputIndex >= operation.inputs.size())
550 {
saoste01b8471482018-10-10 09:44:51 +0100551 if (failOnIndexOutOfBounds)
552 {
553 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
554 }
arovir01b0717b52018-09-05 17:03:25 +0100555 return nullptr;
556 }
557
558 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
559 return &model.operands[operation.inputs[inputIndex]];
560}
561
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100562template<typename HalPolicy,
563 typename HalOperand = typename HalPolicy::Operand,
564 typename HalOperation = typename HalPolicy::Operation,
565 typename HalModel = typename HalPolicy::Model>
566const HalOperand* GetOutputOperand(const HalOperation& operation,
567 uint32_t outputIndex,
568 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100569{
570 if (outputIndex >= operation.outputs.size())
571 {
572 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
573 return nullptr;
574 }
575
576 // Model should have been validated beforehand
577 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
578
579 return &model.operands[operation.outputs[outputIndex]];
580}
581
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100582template<typename HalPolicy,
583 typename HalOperand = typename HalPolicy::Operand,
584 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100585const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100586 const HalModel& model,
587 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000588 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100589{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100590 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100591
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100592 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100593 switch (operand.lifetime)
594 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100595 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100596 {
597 // Constant found in model.operandValues
598 valueStart = &model.operandValues[operand.location.offset];
599 break;
600 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100601 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100602 {
603 // Constant specified via a Memory object
604 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
605 break;
606 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100607 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000608 {
609 // An optional input tensor with no values is not an error so should not register as a fail
610 if (optional)
611 {
612 valueStart = nullptr;
613 break;
614 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100615 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000616 }
arovir01b0717b52018-09-05 17:03:25 +0100617 default:
618 {
619 // Unsupported/invalid (e.g. can't get value of an input to the model)
620 Fail("%s: unsupported/invalid operand lifetime: %s",
621 __func__, toString(operand.lifetime).c_str());
622 valueStart = nullptr;
623 }
624 }
625
626 return valueStart;
627}
628
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100629template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100630 typename HalOperation = typename HalPolicy::Operation,
631 typename HalModel = typename HalPolicy::Model,
632 typename HalOperandType = typename HalPolicy::OperandType>
633bool GetOperandType(const HalOperation& operation,
634 uint32_t inputIndex,
635 const HalModel& model,
636 HalOperandType& type)
637{
638 using HalOperand = typename HalPolicy::Operand;
639
640 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
641 if (!operand)
642 {
643 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
644 }
645
646 type = operand->type;
647 return true;
648}
649
650template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100651 typename HalOperand = typename HalPolicy::Operand,
652 typename HalModel = typename HalPolicy::Model>
653ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
654 const HalModel& model,
655 const ConversionData& data,
656 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
657 const armnn::TensorShape* overrideTensorShape = nullptr,
658 bool optional = false)
659{
660 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
661
662 if (!IsOperandTypeSupportedForTensors(operand.type))
663 {
664 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
665 return ConstTensorPin();
666 }
667
668 if (!optional &&
669 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
670 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
671 operand.lifetime != HalOperandLifeTime::NO_VALUE)
672 {
673 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
674 return ConstTensorPin();
675 }
676
677 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
678 if (!valueStart)
679 {
680 if (optional)
681 {
682 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
683 return ConstTensorPin(true);
684 }
685 // mandatory tensor with no values
686 Fail("%s: failed to get operand address", __func__);
687 return ConstTensorPin();
688 }
689
690 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
691 if (overrideTensorShape != nullptr)
692 {
693 tensorInfo.SetShape(*overrideTensorShape);
694 }
695 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
696}
697
698template<typename HalPolicy,
699 typename HalOperation = typename HalPolicy::Operation,
700 typename HalModel = typename HalPolicy::Model>
701ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
702 uint32_t inputIndex,
703 const HalModel& model,
704 const ConversionData& data,
705 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
706 const armnn::TensorShape* overrideTensorShape = nullptr,
707 bool optional = false)
708{
709 using HalOperand = typename HalPolicy::Operand;
710
711 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
712 if (!operand)
713 {
714 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
715 return ConstTensorPin();
716 }
717 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
718 model,
719 data,
720 dimensionMappings,
721 overrideTensorShape,
722 optional);
723}
724
725template<typename HalPolicy,
726 typename OutputType,
727 typename HalOperandType = typename HalPolicy::OperandType,
728 typename HalOperation = typename HalPolicy::Operation,
729 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100730bool GetInputScalar(const HalOperation& operation,
731 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100732 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100733 OutputType& outValue,
734 const HalModel& model,
735 const ConversionData& data)
736{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100737 using HalOperand = typename HalPolicy::Operand;
738
739 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100740 if (!operand)
741 {
742 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
743 }
744
745 if (operand->type != type)
746 {
747 return Fail("%s: unexpected operand type: %s (should be %s)",
748 __func__, toString(operand->type).c_str(), toString(type).c_str());
749 }
750
751 if (operand->location.length != sizeof(OutputType))
752 {
753 return Fail("%s: incorrect operand location length: %i (should be %i)",
754 __func__, operand->location.length, sizeof(OutputType));
755 }
756
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100757 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100758 if (!valueAddress)
759 {
760 return Fail("%s: failed to get address for operand", __func__);
761 }
762
763 outValue = *(static_cast<const OutputType*>(valueAddress));
764 return true;
765}
766
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100767template<typename HalPolicy,
768 typename HalOperation = typename HalPolicy::Operation,
769 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100770bool GetInputInt32(const HalOperation& operation,
771 uint32_t inputIndex,
772 int32_t& outValue,
773 const HalModel& model,
774 const ConversionData& data)
775{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100776 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100777}
778
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100779template<typename HalPolicy,
780 typename HalOperation = typename HalPolicy::Operation,
781 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100782bool GetInputFloat32(const HalOperation& operation,
783 uint32_t inputIndex,
784 float& outValue,
785 const HalModel& model,
786 const ConversionData& data)
787{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100788 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100789}
790
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100791template<typename HalPolicy,
792 typename HalOperation = typename HalPolicy::Operation,
793 typename HalOperandType = typename HalPolicy::OperandType,
794 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100795bool GetInputActivationFunctionImpl(const HalOperation& operation,
796 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100797 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100798 ActivationFn& outActivationFunction,
799 const HalModel& model,
800 const ConversionData& data)
801{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100802 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100803 {
804 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
805 __func__,
806 toString(type).c_str(),
807 toString(OperandType::INT32).c_str(),
808 toString(OperandType::TENSOR_INT32).c_str());
809 }
810
811 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100812 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100813 {
814 return Fail("%s: failed to get activation input value", __func__);
815 }
816 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
817 return true;
818}
819
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100820template<typename HalPolicy,
821 typename HalOperation = typename HalPolicy::Operation,
822 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100823bool GetInputActivationFunction(const HalOperation& operation,
824 uint32_t inputIndex,
825 ActivationFn& outActivationFunction,
826 const HalModel& model,
827 const ConversionData& data)
828{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100829 return GetInputActivationFunctionImpl<HalPolicy>(operation,
830 inputIndex,
831 HalPolicy::OperandType::INT32,
832 outActivationFunction,
833 model,
834 data);
arovir01b0717b52018-09-05 17:03:25 +0100835}
836
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100837template<typename HalPolicy,
838 typename HalOperation = typename HalPolicy::Operation,
839 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100840bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
841 uint32_t inputIndex,
842 ActivationFn& outActivationFunction,
843 const HalModel& model,
844 const ConversionData& data)
845{
846 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100847 return GetInputActivationFunctionImpl<HalPolicy>(operation,
848 inputIndex,
849 HalPolicy::OperandType::INT32,
850 outActivationFunction,
851 model,
852 data);
arovir01b0717b52018-09-05 17:03:25 +0100853}
854
855
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100856template<typename HalPolicy,
857 typename HalOperation = typename HalPolicy::Operation,
858 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100859bool GetOptionalInputActivation(const HalOperation& operation,
860 uint32_t inputIndex,
861 ActivationFn& activationFunction,
862 const HalModel& model,
863 const ConversionData& data)
864{
865 if (operation.inputs.size() <= inputIndex)
866 {
867 activationFunction = ActivationFn::kActivationNone;
868 }
869 else
870 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100871 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100872 {
873 return Fail("%s: Operation has invalid inputs", __func__);
874 }
875 }
876 return true;
877}
878
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100879template<typename HalPolicy,
880 typename ConvolutionDescriptor,
881 typename HalOperation = typename HalPolicy::Operation,
882 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100883bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
884 uint32_t dilationXIndex,
885 ConvolutionDescriptor& descriptor,
886 const HalModel& model,
887 const ConversionData& data)
888{
889 bool success = true;
890 if (operation.inputs.size() >= dilationXIndex + 2)
891 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100892 success &= GetInputScalar<HalPolicy>(operation,
893 dilationXIndex,
894 HalPolicy::OperandType::INT32,
895 descriptor.m_DilationX,
896 model,
897 data);
898 success &= GetInputScalar<HalPolicy>(operation,
899 dilationXIndex + 1,
900 HalPolicy::OperandType::INT32,
901 descriptor.m_DilationY,
902 model,
903 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100904 }
905
906 return success;
907}
908
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100909template<typename HalPolicy,
910 typename HalOperand = typename HalPolicy::Operand,
911 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100912bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100913 std::vector<int32_t>& outValues,
914 const HalModel& model,
915 const ConversionData& data)
916{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100917 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100918 {
919 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
920 }
921
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100922 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100923 if (!startAddress)
924 {
925 return Fail("%s: failed to get operand address", __func__, operand.type);
926 }
927
928 // Check number of bytes is sensible
929 const uint32_t numBytes = operand.location.length;
930 if (numBytes % sizeof(int32_t) != 0)
931 {
932 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
933 __func__, numBytes, sizeof(int32_t));
934 }
935
936 outValues.resize(numBytes / sizeof(int32_t));
937 memcpy(outValues.data(), startAddress, numBytes);
938 return true;
939}
940
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100941template<typename HalPolicy,
942 typename HalOperation = typename HalPolicy::Operation,
943 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100944bool GetInputPaddingScheme(const HalOperation& operation,
945 uint32_t inputIndex,
946 PaddingScheme& outPaddingScheme,
947 const HalModel& model,
948 const ConversionData& data)
949{
950 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100951 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100952 {
953 return Fail("%s: failed to get padding scheme input value", __func__);
954 }
955
956 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
957 return true;
958}
959
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100960template<typename HalPolicy,
961 typename HalOperation = typename HalPolicy::Operation,
962 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100963LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
964 uint32_t inputIndex,
965 const HalModel& model,
966 ConversionData& data)
967{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100968 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +0100969 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100970 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
971
972 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100973 if (!operand)
974 {
975 Fail("%s: failed to get input operand %i", __func__, inputIndex);
976 return LayerInputHandle();
977 }
978
979 if (!IsOperandTypeSupportedForTensors(operand->type))
980 {
981 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
982 return LayerInputHandle();
983 }
984
Sadik Armagan44bcc022019-06-18 17:21:36 +0100985 try
arovir01b0717b52018-09-05 17:03:25 +0100986 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100987 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100988 if (IsDynamicTensor(operandTensorInfo))
989 {
990 Fail("%s: dynamic input tensors are not supported", __func__);
991 return LayerInputHandle();
992 }
arovir01b0717b52018-09-05 17:03:25 +0100993
Sadik Armagan44bcc022019-06-18 17:21:36 +0100994 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +0100995 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100996 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +0100997 {
998 // NOTE: We must check whether we can support the input tensor on at least one
999 // of the provided backends; otherwise we cannot convert the operation
1000 bool isInputSupported = false;
1001 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1002 IsInputSupported,
1003 data.m_Backends,
1004 isInputSupported,
1005 operandTensorInfo);
1006
1007 if (!isInputSupported)
1008 {
1009 Fail("%s: unsupported input tensor", __func__);
1010 return LayerInputHandle();
1011 }
1012
1013 BOOST_FALLTHROUGH; // intentional fallthrough
1014 }
1015 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001016 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001017 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001018 // The tensor is either an operand internal to the model, or a model input.
1019 // It can be associated with an ArmNN output slot for an existing layer.
1020
1021 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1022 const uint32_t operandIndex = operation.inputs[inputIndex];
1023 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001024 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001025 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001026 case HalOperandLifeTime::CONSTANT_REFERENCE:
1027 {
1028 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1029 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1030 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001031 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001032 bool isSupported = false;
1033 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1034 IsConstantSupported,
1035 data.m_Backends,
1036 isSupported,
1037 tensorPin.GetConstTensor().GetInfo());
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001038 if (isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001039 {
1040 return LayerInputHandle();
1041 }
1042
1043 armnn::IConnectableLayer* constantLayer =
1044 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1045 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1046 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1047
1048 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1049 }
1050 else
1051 {
1052 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001053 return LayerInputHandle();
1054 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001055 break;
arovir01b0717b52018-09-05 17:03:25 +01001056 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001057 default:
arovir01b0717b52018-09-05 17:03:25 +01001058 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001059 // Unsupported lifetime for an input tensor
1060 Fail("%s: unsupported lifetime for input tensor: %s",
1061 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001062 return LayerInputHandle();
1063 }
arovir01b0717b52018-09-05 17:03:25 +01001064 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001065 }
1066 catch (UnsupportedOperand<HalOperandType>& e)
1067 {
1068 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1069 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001070 }
1071}
1072
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001073template<typename HalPolicy,
1074 typename HalOperation = typename HalPolicy::Operation,
1075 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001076bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1077 uint32_t operationOutputIndex,
1078 armnn::IConnectableLayer& layer,
1079 uint32_t layerOutputIndex,
1080 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001081 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001082{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001083 using HalOperand = typename HalPolicy::Operand;
1084
1085 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001086 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1087 {
1088 return false;
1089 }
1090
1091 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1092
1093 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1094 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1095
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001096 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001097
1098 return true;
1099}
1100
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001101template<typename HalPolicy,
1102 typename HalOperation = typename HalPolicy::Operation,
1103 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001104armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1105 uint32_t inputIndex,
1106 const HalModel& model,
1107 ConversionData& data)
1108{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001109 using HalOperand = typename HalPolicy::Operand;
1110
1111 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001112 if (!operand)
1113 {
1114 return armnn::DataLayout::NHWC;
1115 }
1116
1117 if (!IsBool(*operand))
1118 {
1119 return armnn::DataLayout::NHWC;
1120 }
1121
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001122 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001123 if (!valueAddress)
1124 {
1125 return armnn::DataLayout::NHWC;
1126 }
1127
1128 if (*(static_cast<const bool*>(valueAddress)))
1129 {
1130 return armnn::DataLayout::NCHW;
1131 }
1132 else
1133 {
1134 return armnn::DataLayout::NHWC;
1135 }
1136}
1137
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001138template<typename HalPolicy,
1139 typename HalOperation = typename HalPolicy::Operation,
1140 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001141bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1142 uint32_t outputIndex,
1143 armnn::IConnectableLayer& layer,
1144 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001145 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001146{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001147 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1148 outputIndex,
1149 layer,
1150 outputIndex,
1151 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001152 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001153}
1154
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001155template<typename HalPolicy,
1156 typename HalOperation = typename HalPolicy::Operation,
1157 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001158bool ConvertToActivation(const HalOperation& operation,
1159 const char* operationName,
1160 const armnn::ActivationDescriptor& activationDesc,
1161 const HalModel& model,
1162 ConversionData& data)
1163{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001164 using HalOperand = typename HalPolicy::Operand;
1165
1166 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001167 if (!input.IsValid())
1168 {
1169 return Fail("%s: Input 0 is invalid", operationName);
1170 }
1171
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001172 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001173 if (!outputOperand)
1174 {
1175 return false;
1176 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001177
1178 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001179 if (IsDynamicTensor(outInfo))
1180 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001181 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001182 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001183
1184 bool isSupported = false;
1185 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1186 IsActivationSupported,
1187 data.m_Backends,
1188 isSupported,
1189 input.GetTensorInfo(),
1190 outInfo,
1191 activationDesc);
1192 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001193 {
1194 return false;
1195 }
1196
1197 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1198 BOOST_ASSERT(layer != nullptr);
1199 input.Connect(layer->GetInputSlot(0));
1200
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001201 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001202}
1203
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001204template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001205 typename HalOperation = typename HalPolicy::Operation,
1206 typename HalModel = typename HalPolicy::Model>
1207bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1208{
1209 armnn::ActivationDescriptor desc;
1210 desc.m_Function = armnn::ActivationFunction::ReLu;
1211
1212 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1213}
1214
1215template<typename HalPolicy,
1216 typename HalOperation = typename HalPolicy::Operation,
1217 typename HalModel = typename HalPolicy::Model>
1218bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1219{
1220 armnn::ActivationDescriptor desc;
1221 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1222 desc.m_A = 1.0f;
1223 desc.m_B = -1.0f;
1224
1225 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1226}
1227
1228template<typename HalPolicy,
1229 typename HalOperation = typename HalPolicy::Operation,
1230 typename HalModel = typename HalPolicy::Model>
1231bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1232{
1233 armnn::ActivationDescriptor desc;
1234 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1235 desc.m_A = 6.0f;
1236
1237 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1238}
1239
1240template<typename HalPolicy,
1241 typename HalOperation = typename HalPolicy::Operation,
1242 typename HalModel = typename HalPolicy::Model>
1243bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1244{
1245 armnn::ActivationDescriptor desc;
1246 desc.m_Function = armnn::ActivationFunction::TanH;
1247 desc.m_A = 1.0f; // android nn does not support tanH parameters
1248 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1249
1250 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1251}
1252
1253template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001254 typename HalOperation = typename HalPolicy::Operation,
1255 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001256bool ConvertPaddings(const HalOperation& operation,
1257 const HalModel& model,
1258 ConversionData& data,
1259 unsigned int rank,
1260 armnn::PadDescriptor& padDescriptor)
1261{
1262 using HalOperand = typename HalPolicy::Operand;
1263
1264 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1265 if (!paddingsOperand)
1266 {
1267 return Fail("%s: Could not read paddings operand", __func__);
1268 }
1269
1270 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1271 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1272 {
1273 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1274 }
1275
1276 std::vector<int32_t> paddings;
1277 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1278
1279 // add padding for each dimension of input tensor.
1280 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1281 {
1282 int paddingBeforeInput = paddings[i];
1283 int paddingAfterInput = paddings[i + 1];
1284
1285 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1286 {
1287 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1288 }
1289
1290 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1291 }
1292
1293 return true;
1294}
1295
1296template<typename HalPolicy,
1297 typename HalOperation = typename HalPolicy::Operation,
1298 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001299bool ConvertPooling2d(const HalOperation& operation,
1300 const char* operationName,
1301 armnn::PoolingAlgorithm poolType,
1302 const HalModel& model,
1303 ConversionData& data)
1304{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001305 using HalOperand = typename HalPolicy::Operand;
1306 using HalOperandType = typename HalPolicy::OperandType;
1307
1308 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001309 if (!input.IsValid())
1310 {
1311 return Fail("%s: Could not read input 0", operationName);
1312 }
1313
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001314 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001315 if (!output)
1316 {
1317 return Fail("%s: Could not read output 0", __func__);
1318 }
1319
1320 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1321 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1322
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001323 if (IsDynamicTensor(outputInfo))
1324 {
1325 return Fail("%s: Dynamic output tensors are not supported", __func__);
1326 }
1327
arovir01b0717b52018-09-05 17:03:25 +01001328 armnn::Pooling2dDescriptor desc;
1329 desc.m_PoolType = poolType;
1330 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001331 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001332
1333 ActivationFn activation;
1334
Sadik Armagan15d63e22019-07-26 16:59:35 +01001335 auto inputSize = operation.inputs.size();
1336
1337 if (inputSize >= 10)
1338 {
1339 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1340 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1341 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1342 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1343 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1344 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1345 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1346 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1347 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1348 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1349 {
1350 return Fail("%s: Operation has invalid inputs", operationName);
1351 }
1352
1353 if (Is12Operand(*output))
1354 {
1355 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1356 }
1357 }
1358 else
arovir01b0717b52018-09-05 17:03:25 +01001359 {
1360 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1361 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001362 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1363 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1364 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1365 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1366 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1367 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001368 {
1369 return Fail("%s: Operation has invalid inputs", operationName);
1370 }
1371
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001372 const unsigned int inputWidth = inputInfo.GetShape()[2];
1373 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001374
1375 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1376 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
Sadik Armagan15d63e22019-07-26 16:59:35 +01001377
1378 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001379 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001380 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001381 }
1382 }
1383
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001384 bool isSupported = false;
1385 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1386 IsPooling2dSupported,
1387 data.m_Backends,
1388 isSupported,
1389 inputInfo,
1390 outputInfo,
1391 desc);
1392 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001393 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001394 return false;
arovir01b0717b52018-09-05 17:03:25 +01001395 }
arovir01b0717b52018-09-05 17:03:25 +01001396
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001397 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1398 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001399 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001400 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001401 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001402
1403 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1404 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001405 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001406 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001407 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001408
1409 input.Connect(pooling2dLayer->GetInputSlot(0));
1410
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001411 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001412}
1413
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001414template<typename HalPolicy,
Mike Kellyb8805202019-07-31 17:25:43 +01001415 typename Operation = typename HalPolicy::Operation,
1416 typename Model = typename HalPolicy::Model>
1417bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
1418{
1419 using HalOperand = typename HalPolicy::Operand;
1420 using HalOperandType = typename HalPolicy::OperandType;
1421
1422 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
1423 if (operation.inputs.size() <= 1)
1424 {
1425 return Fail("%s: Operation has insufficient arguments", __func__);
1426 }
1427
1428 // Get inputs and outputs
1429 const std::size_t numInputTensors = operation.inputs.size() - 1;
1430
1431 int32_t concatDim;
1432 if (!GetInputScalar<HalPolicy>(operation, numInputTensors, HalOperandType::INT32, concatDim, model, data))
1433 {
1434 return Fail("%s: Operation has invalid inputs", __func__);
1435 }
1436
1437 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
1438 if (!outputOperand)
1439 {
1440 return Fail("%s: Operation has no outputs", __func__);
1441 }
1442
1443
1444 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
1445 armnn::TensorShape outputShape = outputInfo.GetShape();
1446
1447 //
1448 // handle negative concat dims along the lines of tensorflow as described here:
1449 // https://www.tensorflow.org/api_docs/python/tf/concat
1450 // "negative axis refers to axis + rank(values)-th dimension"
1451 //
1452 if (concatDim < 0)
1453 {
1454 concatDim += outputShape.GetNumDimensions();
1455 }
1456
1457 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
1458 {
1459 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
1460 }
1461
1462 std::vector<LayerInputHandle> inputHandles;
1463 std::vector<armnn::TensorShape> inputShapes;
1464
1465 inputHandles.reserve(numInputTensors);
1466 inputShapes.reserve(numInputTensors);
1467
1468 bool inputsHaveBeenReshaped = false;
1469 unsigned int tensorDimensionsAdded = 0;
1470
1471 for (uint32_t i = 0; i < numInputTensors; ++i)
1472 {
1473 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, i, model);
1474 if (!operand)
1475 {
1476 return Fail("%s: Operation has invalid inputs", __func__);
1477 }
1478
1479 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
1480 LayerInputHandle operandInputHandle =
1481 ConvertToLayerInputHandle<HalPolicy>(operation, i, model, data);
1482
1483 if (operandShape.GetNumDimensions() == 0)
1484 {
1485 return Fail("%s: Operands with rank 0 are not supported", __func__);
1486 }
1487
1488 if (RequiresReshape(operandShape))
1489 {
1490 inputsHaveBeenReshaped = true;
1491
1492 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
1493
1494 // Expand the tensor to three dimensions
1495 if (operandShape.GetNumDimensions() == 2)
1496 {
1497 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
1498 tensorDimensionsAdded = 1;
1499 }
1500 else
1501 {
1502 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
1503 tensorDimensionsAdded = 2;
1504 }
1505
1506 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
1507 *data.m_Network,
1508 operandInputHandle,
1509 reshapeInfo
1510 );
1511
1512 // Point to the reshape operation rather then the input operation
1513 operandShape = reshapeInfo.GetShape();
1514 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
1515 }
1516
1517 inputShapes.emplace_back(operandShape);
1518 inputHandles.emplace_back(operandInputHandle);
1519
1520 if (!inputHandles.back().IsValid())
1521 {
1522 return Fail("%s: Operation has invalid inputs", __func__);
1523 }
1524 }
1525
1526 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
1527
1528 if (inputsHaveBeenReshaped)
1529 {
1530 // Adjust the concatenation dimension by the amount of dimensions added (if any)
1531 concatDim += tensorDimensionsAdded;
1532
1533 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
1534 if (tensorDimensionsAdded == 1)
1535 {
1536 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
1537 }
1538 else if (tensorDimensionsAdded == 2)
1539 {
1540 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
1541 }
1542 }
1543
1544 // Check if permutations is required and get the pair of permutations required for the concatenation.
1545 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
1546 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
1547 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
1548
1549 bool needPermute =
1550 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
1551
1552 if (needPermute)
1553 {
1554 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
1555 }
1556
1557 outputInfo.SetShape(outputShape);
1558
1559 // this is no-op for identity swizzles, otherwise it replaces both
1560 // the handles and shapes with the swizzled layer output handles and shapes
1561 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
1562
1563 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
1564 armnn::OriginsDescriptor concatDescriptor;
1565
1566 try
1567 {
1568 // The concat descriptor is always created across the only supported concat dimension
1569 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1570 concatDescriptor =
1571 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
1572 }
1573 catch (const armnn::Exception& error)
1574 {
1575 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
1576 }
1577
1578 // Validate the output shape is correct given the input shapes based on the
1579 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
1580 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
1581 {
1582 return Fail("%s: Error validating the output shape for concat", __func__);
1583 }
1584
1585 std::vector<const armnn::TensorInfo*> inputTensorInfos;
1586 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
1587 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
1588
1589 bool isSupported = false;
1590 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1591 IsConcatSupported,
1592 data.m_Backends,
1593 isSupported,
1594 inputTensorInfos,
1595 outputInfo,
1596 concatDescriptor);
1597 if (!isSupported)
1598 {
1599 return false;
1600 }
1601
1602 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
1603 assert(layer != nullptr);
1604 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1605
1606 // Connect inputs to the layer
1607 const int numInputSlots = layer->GetNumInputSlots();
1608 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
1609 for (int i = 0; i < numInputSlots; ++i)
1610 {
1611 // connect the input directly to the merge (concat) layer
1612 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
1613 }
1614
1615 if (needPermute)
1616 {
1617 // Add permutation layer and connect the output to it, the permutation becomes the output layer
1618 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
1619 layer->GetOutputSlot(0),
1620 permutationPair.second);
1621 layer = &deswizzleLayer;
1622 }
1623
1624 if (inputsHaveBeenReshaped)
1625 {
1626 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
1627
1628 // Undo the reshape knowing the amount of dimensions added
1629 if (tensorDimensionsAdded == 1)
1630 {
1631 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
1632 afterConcatInfo.GetShape()[2] }));
1633 }
1634 else if (tensorDimensionsAdded == 2)
1635 {
1636 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
1637 }
1638
1639 layer = &AddReshapeLayer(
1640 *data.m_Network,
1641 layer->GetOutputSlot(0),
1642 afterConcatInfo
1643 );
1644 }
1645
1646 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1647}
1648
1649template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001650 typename HalOperation = typename HalPolicy::Operation,
1651 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001652bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1653{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001654 using HalOperand = typename HalPolicy::Operand;
1655 using HalOperandType = typename HalPolicy::OperandType;
1656
1657 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001658 if (!input.IsValid())
1659 {
1660 return Fail("%s: Operation has invalid inputs", __func__);
1661 }
1662
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001663 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001664 if (!output)
1665 {
1666 return Fail("%s: Could not read output 0", __func__);
1667 }
1668
1669 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001670 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001671
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001672 if (IsDynamicTensor(outputInfo))
1673 {
1674 return Fail("%s: Dynamic output tensors are not supported", __func__);
1675 }
1676
Mike Kellyb5fdf382019-06-11 16:35:25 +01001677 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001678 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1679 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001680
1681 if (!weightsPin.IsValid() || !biasPin.IsValid())
1682 {
1683 return Fail("%s: Operation has invalid inputs", __func__);
1684 }
1685
1686 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001687 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001688 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1689
1690 armnn::Convolution2dDescriptor desc;
1691 desc.m_DataLayout = armnn::DataLayout::NHWC;
1692 ActivationFn activation;
1693
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001694 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001695 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001696 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1697 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1698 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1699 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1700 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1701 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001702 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001703 {
1704 return Fail("%s: Operation has invalid inputs", __func__);
1705 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001706 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001707 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001708 {
1709 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001710 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1711 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1712 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001713 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001714 {
1715 return Fail("%s: Operation has invalid inputs", __func__);
1716 }
1717
1718 const uint32_t kernelX = weights.GetShape()[2];
1719 const uint32_t kernelY = weights.GetShape()[1];
1720 const uint32_t inputX = inputInfo.GetShape()[2];
1721 const uint32_t inputY = inputInfo.GetShape()[1];
1722
1723 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1724 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001725 }
1726 else
1727 {
1728 return Fail("%s: Unsupported number of operation inputs", __func__);
1729 }
1730
1731 desc.m_BiasEnabled = true;
1732 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1733
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001734 bool isSupported = false;
1735 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1736 IsConvolution2dSupported,
1737 data.m_Backends,
1738 isSupported,
1739 inputInfo,
1740 outputInfo,
1741 desc,
1742 weights.GetInfo(),
1743 biases);
1744 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001745 {
1746 return false;
1747 }
1748
1749 armnn::IConnectableLayer* startLayer =
1750 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1751
1752 if (!startLayer)
1753 {
1754 return Fail("%s: AddConvolution2dLayer failed", __func__);
1755 }
1756
1757 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1758
1759 if (!endLayer)
1760 {
1761 return Fail("%s: ProcessActivation failed", __func__);
1762 }
1763
1764 input.Connect(startLayer->GetInputSlot(0));
1765
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001766 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001767}
1768
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001769template<typename HalPolicy,
1770 typename HalOperation = typename HalPolicy::Operation,
1771 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001772bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1773{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001774 using HalOperand = typename HalPolicy::Operand;
1775 using HalOperandType = typename HalPolicy::OperandType;
1776
1777 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001778
1779 if (!input.IsValid())
1780 {
1781 return Fail("%s: Operation has invalid inputs", __func__);
1782 }
1783
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001784 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001785
1786 if (!output)
1787 {
1788 return Fail("%s: Could not read output 0", __func__);
1789 }
1790
1791 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001792 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001793
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001794 if (IsDynamicTensor(outputInfo))
1795 {
1796 return Fail("%s: Dynamic output tensors are not supported", __func__);
1797 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001798
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001799 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01001800 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001801 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001802
1803 if (weightsOperand == nullptr)
1804 {
1805 return Fail("%s: Operand is invalid", __func__);
1806 }
1807 armnn::DepthwiseConvolution2dDescriptor desc;
1808 desc.m_DataLayout = armnn::DataLayout::NHWC;
1809
Mike Kellyb5fdf382019-06-11 16:35:25 +01001810 // Reinterpret weight data as [ H, W, I, M ]
1811 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1812 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001813 inputInfo.GetShape()[3],
1814 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01001815
1816 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1817 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1818
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001819 const ConstTensorPin weightsPin =
1820 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1821 1,
1822 model,
1823 data,
1824 HWIMToMIHW,
1825 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001826
1827 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001828 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001829
1830 if (!weightsPin.IsValid() || !biasPin.IsValid())
1831 {
1832 return Fail("%s: Operation has invalid inputs", __func__);
1833 }
1834
1835 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1836 armnn::ConstTensor bias = biasPin.GetConstTensor();
1837 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1838
1839 ActivationFn activation;
1840
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001841 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001842 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001843 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1844 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1845 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1846 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1847 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1848 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001849 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001850 {
1851 return Fail("%s: Operation has invalid inputs", __func__);
1852 }
1853 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001854 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001855 {
1856 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001857 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1858 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1859 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001860 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001861 {
1862 return Fail("%s: Operation has invalid inputs", __func__);
1863 }
1864
1865 const uint32_t kernelX = weights.GetShape()[3];
1866 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001867 const uint32_t inputX = inputInfo.GetShape()[2];
1868 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01001869
1870 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1871 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1872 }
1873 else
1874 {
1875 return Fail("%s: Unsupported number of operation inputs", __func__);
1876 }
1877
1878 desc.m_BiasEnabled = true;
1879 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1880
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001881 bool isSupported = false;
1882 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1883 IsDepthwiseConvolutionSupported,
1884 data.m_Backends,
1885 isSupported,
1886 inputInfo,
1887 outputInfo,
1888 desc,
1889 weights.GetInfo(),
1890 biases);
1891 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001892 {
1893 return false;
1894 }
1895
1896 armnn::IConnectableLayer* startLayer =
1897 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1898 if (!startLayer)
1899 {
1900 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1901 }
1902
1903 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1904 if (!endLayer)
1905 {
1906 return Fail("%s: ProcessActivation failed", __func__);
1907 }
1908
1909 input.Connect(startLayer->GetInputSlot(0));
1910
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001911 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001912}
1913
Mike Kelly3c673942019-07-25 09:26:06 +01001914template<typename HalPolicy,
1915 typename HalOperation = typename HalPolicy::Operation,
1916 typename HalOperand = typename HalPolicy::Operand,
1917 typename HalModel = typename HalPolicy::Model>
1918bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
1919{
Mike Kelly3c673942019-07-25 09:26:06 +01001920 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1921 if (!input.IsValid())
1922 {
1923 return Fail("%s: Operation has invalid inputs", __func__);
1924 }
1925
1926 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1927 unsigned int rank = inputInfo.GetNumDimensions();
1928
1929 armnn::PadDescriptor descriptor;
1930 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
1931 {
1932 return Fail("%s: Could not convert paddings", __func__);
1933 }
1934
1935 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
1936 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
1937 // (QuantizationOffset - QuantizationOffset) * scale = 0.
1938 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
1939 {
1940 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
1941 }
1942
1943 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1944 if (!output)
1945 {
1946 return Fail("%s: Could not read output", __func__);
1947 }
1948
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001949 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01001950 if (IsDynamicTensor(outputInfo))
1951 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001952 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01001953 }
1954
1955 bool isSupported = false;
1956 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1957 IsPadSupported,
1958 data.m_Backends,
1959 isSupported,
1960 inputInfo,
1961 outputInfo,
1962 descriptor);
1963 if (!isSupported)
1964 {
1965 return false;
1966 }
1967
1968 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
1969 assert(layer != nullptr);
1970 input.Connect(layer->GetInputSlot(0));
1971 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1972
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001973 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01001974}
1975
Mike Kelly0a879362019-07-29 16:56:31 +01001976template<typename HalPolicy,
1977 typename Operation = typename HalPolicy::Operation,
1978 typename Operand = typename HalPolicy::Operand,
1979 typename Model = typename HalPolicy::Model>
1980bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
1981{
1982 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1983 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1984
1985 if (!input0.IsValid() || !input1.IsValid())
1986 {
1987 return Fail("%s: Operation has invalid inputs", __func__);
1988 }
1989
1990 // The FuseActivation parameter is always the input index 2
1991 // and it should be optional
1992 ActivationFn activationFunction;
1993 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1994 {
1995 return Fail("%s: Operation has invalid inputs", __func__);
1996 }
1997
1998 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1999 if (!output)
2000 {
2001 return Fail("%s: Could not read output 0", __func__);
2002 }
2003
2004 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2005 if (IsDynamicTensor(outputInfo))
2006 {
2007 return Fail("%s: Dynamic output tensors are not supported", __func__);
2008 }
2009
2010 bool isSupported = false;
2011 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2012 IsSubtractionSupported,
2013 data.m_Backends,
2014 isSupported,
2015 input0.GetTensorInfo(),
2016 input1.GetTensorInfo(),
2017 outputInfo);
2018 if (!isSupported)
2019 {
2020 return false;
2021 }
2022
2023 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
2024 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
2025
2026 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
2027 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
2028
2029 if (endLayer)
2030 {
2031 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
2032 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
2033 }
2034
2035 return Fail("%s: ProcessActivation failed", __func__);
2036}
2037
Finn Williams23b87b32019-07-30 11:44:05 +01002038template<typename HalPolicy,
2039 typename HalOperation = typename HalPolicy::Operation,
Finn Williams0e4e4392019-07-31 10:56:27 +01002040 typename HalOperand = typename HalPolicy::Operand,
Finn Williams23b87b32019-07-30 11:44:05 +01002041 typename HalModel = typename HalPolicy::Model>
2042bool ConvertBatchToSpaceNd(const HalOperation& operation,
2043 const HalModel& model,
2044 ConversionData& data)
2045{
Finn Williams23b87b32019-07-30 11:44:05 +01002046
2047 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2048 if (!input.IsValid())
2049 {
2050 return Fail("%s: Operation has invalid inputs", __func__);
2051 }
2052
2053 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2054 if (!output)
2055 {
2056 return Fail("%s: Could not read output 0", __func__);
2057 }
2058
2059 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2060 if (IsDynamicTensor(outputInfo))
2061 {
2062 return Fail("%s: Dynamic output tensors are not supported", __func__);
2063 }
2064
2065 const HalOperand* blockOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2066 if (!blockOperand)
2067 {
2068 return Fail("%s: Could not read input 1", __func__);
2069 }
2070
2071 // Convert the block operand to int32
2072 std::vector<int32_t> block;
2073 if (!GetTensorInt32Values<HalPolicy>(*blockOperand, block, model, data))
2074 {
2075 return Fail("%s: Input 1 has invalid values", __func__);
2076 }
2077
2078 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2079
2080 unsigned int rank = inputInfo.GetNumDimensions();
2081 if (rank != 4)
2082 {
2083 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
2084 }
2085
2086 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
2087 {
2088 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
2089 " greater than or equal to 1", __func__);
2090 }
2091
2092 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
2093 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
2094 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
2095
2096 if (Is12Operand(*output))
2097 {
Finn Williams0e4e4392019-07-31 10:56:27 +01002098 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 2, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +01002099 }
2100 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
2101 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
2102
2103 bool isSupported = false;
2104 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2105 IsBatchToSpaceNdSupported,
2106 data.m_Backends,
2107 isSupported,
2108 inputInfo,
2109 outputInfo,
2110 batchToSpaceNdDesc);
2111 if (!isSupported)
2112 {
2113 return false;
2114 }
2115
2116 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
2117 assert(layer != nullptr);
2118 input.Connect(layer->GetInputSlot(0));
2119
2120 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2121}
Mike Kelly0a879362019-07-29 16:56:31 +01002122
Finn Williamsd74c5052019-07-30 17:06:00 +01002123template<typename HalPolicy,
2124 typename HalOperation = typename HalPolicy::Operation,
2125 typename HalOperand = typename HalPolicy::Operand,
2126 typename HalModel = typename HalPolicy::Model>
2127bool ConvertSpaceToBatchNd(const HalOperation& operation, const HalModel& model, ConversionData& data)
2128{
2129 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
2130 if (!input.IsValid())
2131 {
2132 return Fail("%s: Operation has invalid inputs", __func__);
2133 }
2134
2135 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2136 unsigned int rank = inputInfo.GetNumDimensions();
2137 unsigned int spatialDim = rank - 2;
2138
2139 if (rank != 4)
2140 {
2141 Fail("%s: Only inputs with rank 4 are supported", __func__);
2142 }
2143
2144 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
2145 if (!output)
2146 {
2147 return Fail("%s: Could not read output 0", __func__);
2148 }
2149
2150 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2151 if (IsDynamicTensor(outputInfo))
2152 {
2153 return Fail("%s: Dynamic output tensors are not supported", __func__);
2154 }
2155
2156 const HalOperand* blockShapeOperand = GetInputOperand<HalPolicy>(operation, 1, model);
2157 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 2, model);
2158
2159 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
2160 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
2161 {
2162 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
2163 }
2164
2165 std::vector<int32_t> blockShape;
2166 GetTensorInt32Values<HalPolicy>(*blockShapeOperand, blockShape, model, data);
2167 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
2168 {
2169 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
2170 }
2171
2172 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
2173 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
2174 {
2175 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
2176 }
2177
2178 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
2179 std::vector<int32_t> paddings;
2180 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
2181 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
2182 {
2183 int paddingBeforeInput = paddings[i];
2184 int paddingAfterInput = paddings[i + 1];
2185 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
2186 {
2187 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
2188 }
2189
2190 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
2191 }
2192
2193 armnn::SpaceToBatchNdDescriptor descriptor;
2194 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2195 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
2196 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
2197
2198 if (Is12Operand(*output))
2199 {
2200 descriptor.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 3, model, data);
2201 }
2202
2203 bool isSupported = false;
2204 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2205 IsSpaceToBatchNdSupported,
2206 data.m_Backends,
2207 isSupported,
2208 inputInfo,
2209 outputInfo,
2210 descriptor);
2211 if (!isSupported)
2212 {
2213 return false;
2214 }
2215
2216 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
2217 assert(layer != nullptr);
2218 input.Connect(layer->GetInputSlot(0));
2219
2220 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
2221}
2222
saoste01b8471482018-10-10 09:44:51 +01002223} // namespace armnn_driver