blob: fc6d36534c01033f0e4bf5115e86e23fb866403f [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
17#include <ActivationFunctor.h>
18#include <CpuExecutor.h>
19#include <OperationsUtils.h>
20
21#include <boost/assert.hpp>
22#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010023#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010024#include <boost/test/tools/floating_point_comparison.hpp>
25
26#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010027#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010028
29namespace armnn_driver
30{
31
32///
33/// Helper classes
34///
35
36struct ConversionData
37{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010038 ConversionData(const std::vector<armnn::BackendId>& backends)
39 : m_Backends(backends)
40 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010041 {}
42
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010043 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010044 armnn::INetworkPtr m_Network;
45 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
46 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
47};
48
49class LayerInputHandle
50{
51public:
52 LayerInputHandle();
53 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
54
55 bool IsValid() const;
56
57 void Connect(armnn::IInputSlot& inputSlot);
58
59 const armnn::TensorInfo& GetTensorInfo() const;
60
61private:
62 armnn::IOutputSlot* m_OutputSlot;
63 bool m_Valid;
64 armnn::TensorInfo m_TensorInfo;
65};
66
67class ConstTensorPin
68{
69public:
70 // Creates an invalid tensor pin (can be used to signal errors)
71 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
72 ConstTensorPin(bool optional = false);
73
74 // @param tensorInfo TensorInfo associated with the tensor.
75 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
76 // the model being converted.
77 // @param numBytes Number of bytes for the tensor data.
78 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
79 const armnn::PermutationVector& mappings);
80
81 ConstTensorPin(const ConstTensorPin& other) = delete;
82 ConstTensorPin(ConstTensorPin&& other) = default;
83
84 bool IsValid() const;
85 bool IsOptional() const;
86
87 const armnn::ConstTensor& GetConstTensor() const;
88 const armnn::ConstTensor* GetConstTensorPtr() const;
89
90private:
91 armnn::ConstTensor m_ConstTensor;
92
93 // Owned memory for swizzled tensor data, only required if the tensor needed
94 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
95 // the pools associated with the model being converted.
96 std::vector<uint8_t> m_SwizzledTensorData;
97
98 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
99 bool m_Optional;
100};
101
102} // namespace armnn_driver
103
104///
105/// Utility functions
106///
107
108namespace
109{
110
111using namespace armnn_driver;
112using namespace android::nn;
113
114// Convenience function to log the reason for failing to convert a model.
115// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
116template<class... Args>
117static bool Fail(const char* formatStr, Args&&... args)
118{
119 ALOGD(formatStr, std::forward<Args>(args)...);
120 return false;
121}
122
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100123// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
124// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
125#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
126 std::string reasonIfUnsupported; \
127 try { \
128 for (auto&& backendId : backends) \
129 { \
130 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
131 if (layerSupportObject) \
132 { \
133 supported = \
134 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
135 if (supported) \
136 { \
137 break; \
138 } \
139 else \
140 { \
141 if (reasonIfUnsupported.size() > 0) \
142 { \
143 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
144 } \
145 else \
146 { \
147 ALOGD("%s: not supported by armnn", funcName); \
148 } \
149 } \
150 } \
151 else \
152 { \
153 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
154 } \
155 } \
156 if (!supported) \
157 { \
158 ALOGD("%s: not supported by any specified backend", funcName); \
159 } \
160 } catch (const armnn::InvalidArgumentException &e) { \
161 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
arovir01b0717b52018-09-05 17:03:25 +0100162 }
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100163
Mike Kellyb5fdf382019-06-11 16:35:25 +0100164template<typename Operand>
165armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100166{
167 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
168}
169
Matthew Bentham912b3622019-05-03 15:49:14 +0100170inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100171{
Matthew Bentham912b3622019-05-03 15:49:14 +0100172 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
173 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
174 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100175}
176
Mike Kellyb5fdf382019-06-11 16:35:25 +0100177#ifdef ARMNN_ANDROID_NN_V1_2
178
179inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
180{
181 return type == V1_2::OperandType::BOOL ||
182 type == V1_2::OperandType::TENSOR_FLOAT16 ||
183 type == V1_2::OperandType::TENSOR_FLOAT32 ||
184 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
185 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
186 type == V1_2::OperandType::TENSOR_INT32;
187}
188
189#endif
190
191inline bool IsBool(V1_0::Operand)
192{
193 return false;
194}
195
Sadik Armagan61113162019-07-25 09:09:40 +0100196inline bool Is12Operand(V1_0::Operand)
197{
198 return false;
199}
200
Mike Kellyb5fdf382019-06-11 16:35:25 +0100201#ifdef ARMNN_ANDROID_NN_V1_2
202
203inline bool IsBool(V1_2::Operand operand)
204{
205 return operand.type == V1_2::OperandType::BOOL;
206}
207
Sadik Armagan61113162019-07-25 09:09:40 +0100208/// Checks if a operand is 1_2 Operand
209inline bool Is12Operand(V1_2::Operand)
210{
211 return true;
212}
213
Mike Kellyb5fdf382019-06-11 16:35:25 +0100214#endif
215
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100216template<typename LayerHandleType>
217armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
218 armnn::TensorInfo reshapeInfo)
219{
220 armnn::ReshapeDescriptor reshapeDescriptor;
221 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
222
223 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
224 BOOST_ASSERT(reshapeLayer != nullptr);
225
226 // Attach the input layer to the reshape layer
227 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
228 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
229
230 return *reshapeLayer;
231}
232
233void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
234 armnn::IConnectableLayer* startLayer, armnn::INetwork& network)
arovir01b0717b52018-09-05 17:03:25 +0100235{
236 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100237
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100238 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
239 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
240
241 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
242 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
243
244 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100245 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100246 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
247 input0.Connect(startLayer->GetInputSlot(0));
248 input1.Connect(startLayer->GetInputSlot(1));
249
250 return;
251 }
252
253 // Since the number of dimensions do not match then we need to add degenerate dimensions
254 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
255
256 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
257 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
258 boost::numeric_cast<int>(inputDimensions1));
259
260 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
261 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
262 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
263
264 const armnn::TensorShape& smallShape = smallInfo.GetShape();
265 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
266 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
267 {
268 reshapedDimensions[i] = smallShape[i - sizeDifference];
269 }
270
271 armnn::TensorInfo reshapedInfo = smallInfo;
272 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
273 reshapedDimensions.data() });
274 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(network, smallInputHandle, reshapedInfo);
275
276 if (input0IsSmaller)
277 {
278 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
279 //
280 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100281 // | |
282 // Reshape |
283 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100284 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100285
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100286 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
287 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100288 }
289 else
290 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100291 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
292 //
293 // Input0 Input1
294 // | |
295 // | Reshape
296 // \ /
297 // StartLayer
298
arovir01b0717b52018-09-05 17:03:25 +0100299 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100300 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100301 }
302}
303
304void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
305 android::nn::PaddingScheme scheme)
306{
307 int32_t padHead;
308 int32_t padTail;
309 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
310 outPadHead = boost::numeric_cast<uint32_t>(padHead);
311 outPadTail = boost::numeric_cast<uint32_t>(padTail);
312}
313
Mike Kelly86b36d42019-07-12 16:39:33 +0100314#ifdef ARMNN_ANDROID_NN_V1_2
315
316void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
317 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
318{
319 int32_t padHead;
320 int32_t padTail;
321 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
322 outPadHead = boost::numeric_cast<uint32_t>(padHead);
323 outPadTail = boost::numeric_cast<uint32_t>(padTail);
324}
325
326#endif
327
Matthew Bentham912b3622019-05-03 15:49:14 +0100328Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100329{
330 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100331 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100332 shape.dimensions = operand.dimensions;
333 shape.scale = operand.scale;
334 shape.offset = operand.zeroPoint;
335 return shape;
336}
337
338// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
339// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
340// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
341// (us, in this case) to ensure they match.
342void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
343 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
344{
345 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
346 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
347 {
348 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
349 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
350 {
351 ALOGW("Bias quantization scale has been modified to match input*weights");
352 biasInfo.SetQuantizationScale(expectedBiasScale);
353 }
354 }
355}
356
357// 4D Tensor Permutations
358const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
359const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
360const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
361const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
362
363// 3D Permutation Vectors
364const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
365const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
366const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
367
368template<typename OSlot>
369armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
370 const armnn::PermutationVector& mappings)
371{
372 // Add swizzle layer
373 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
374
375 BOOST_ASSERT(layer != nullptr);
376
377 // Connect input to swizzle layer
378 input.Connect(layer->GetInputSlot(0));
379
380 // Setup swizzled output
381 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
382 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
383
384 return *layer;
385}
386
387void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
388{
389 // Add swizzle layer
390 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
391 // Connect swizzled input to layer
392 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
393}
394
395armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
396{
397 // Add deswizzle layer
398 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
399 return deswizzleLayer;
400}
401
402// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
403armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
404 LayerInputHandle& input,
405 armnn::IConnectableLayer& firstLayer,
406 armnn::IConnectableLayer& lastLayer)
407{
408 SwizzleIn(network, input, firstLayer, 0);
409 return DeswizzleOut(network, lastLayer, 0);
410}
411
412// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
413armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
414 armnn::IConnectableLayer& layer)
415{
416 return SwizzleInDeswizzleOut(network, input, layer, layer);
417}
418
419bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
420 const armnn::TensorShape & outputShape,
421 uint32_t concatDim)
422{
423 // Validate the output shape is correct given the input shapes (which have just been validated)
424 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
425 if (outputShape.GetNumDimensions() != numDimensions)
426 {
427 return Fail("%s: Output shape has wrong number of dimensions", __func__);
428 }
429
430 unsigned int outputSizeAlongConcatenatedDimension = 0;
431 for (unsigned int i = 0; i < inputShapes.size(); i++)
432 {
433 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
434 }
435
436 for (unsigned int i = 0; i < numDimensions; ++i)
437 {
438 if (i == concatDim)
439 {
440 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
441 {
442 return Fail(
443 "%s: Invalid output shape for dimension %d (%d != %d)",
444 __func__,
445 i,
446 outputShape[i],
447 outputSizeAlongConcatenatedDimension);
448 }
449 }
450 else
451 {
452 if (outputShape[i] != inputShapes[0][i])
453 {
454 return Fail("%s: Invalid output shape", __func__);
455 }
456 }
457 }
458
459 return true;
460}
461
462bool RequiresReshape(armnn::TensorShape & inputShape)
463{
464 return inputShape.GetNumDimensions() < 3;
465}
466
arovir01b0717b52018-09-05 17:03:25 +0100467void SwizzleInputs(armnn::INetwork& network,
468 std::vector<LayerInputHandle>& inputs,
469 std::vector<armnn::TensorShape>& inputShapes,
470 const armnn::PermutationVector& mapping)
471{
472 if (!mapping.IsEqual(IdentityPermutation4D))
473 {
474 size_t nInputs = inputs.size();
475 for (size_t i=0; i<nInputs; ++i)
476 {
477 // add swizzle layer
478 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
479 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
480 auto& outputInfo = outputSlot.GetTensorInfo();
481 // replace inputs with the swizzled ones
482 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
483 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
484 }
485 }
486}
487
narpra01f176d5a2018-11-18 20:17:48 +0000488bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
489 int32_t & concatDimension,
490 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100491{
narpra01f176d5a2018-11-18 20:17:48 +0000492 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100493 BOOST_ASSERT(numberOfDimensions >= 3);
494
495 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000496 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
497 // or along dimension 0 or 2 for a 3-D tensor.
498 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100499 {
narpra01f176d5a2018-11-18 20:17:48 +0000500 concatDimension = 1;
501 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
502 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100503 }
narpra01f176d5a2018-11-18 20:17:48 +0000504 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100505 {
narpra01f176d5a2018-11-18 20:17:48 +0000506 concatDimension = 0;
507 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
508 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100509 }
narpra01f176d5a2018-11-18 20:17:48 +0000510 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100511}
512
513} // anonymous namespace
514
515namespace armnn_driver
516{
517
518//// Creates an ArmNN activation layer and connects it to the given layer, if the
519//// passed in AndroidNN activation function requires so.
520//// @return The end layer of the sequence of layers built for the given AndroidNN
521//// activation function or nullptr if an error occurred (e.g. unsupported activation).
522//// Note that the end layer matches the input layer if no activation is required
523//// (the sequence of layers has length 1).
524armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
525 ActivationFn activation,
526 armnn::IConnectableLayer* prevLayer,
527 ConversionData& data);
528
529} // namespace armnn_driver
530
531///
532/// Utility templates
533///
534
535namespace armnn_driver
536{
537
538using namespace android::nn;
539
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100540template<typename HalPolicy,
541 typename HalOperand = typename HalPolicy::Operand,
542 typename HalOperation = typename HalPolicy::Operation,
543 typename HalModel = typename HalPolicy::Model>
544const HalOperand* GetInputOperand(const HalOperation& operation,
545 uint32_t inputIndex,
546 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100547 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100548{
549 if (inputIndex >= operation.inputs.size())
550 {
saoste01b8471482018-10-10 09:44:51 +0100551 if (failOnIndexOutOfBounds)
552 {
553 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
554 }
arovir01b0717b52018-09-05 17:03:25 +0100555 return nullptr;
556 }
557
558 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
559 return &model.operands[operation.inputs[inputIndex]];
560}
561
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100562template<typename HalPolicy,
563 typename HalOperand = typename HalPolicy::Operand,
564 typename HalOperation = typename HalPolicy::Operation,
565 typename HalModel = typename HalPolicy::Model>
566const HalOperand* GetOutputOperand(const HalOperation& operation,
567 uint32_t outputIndex,
568 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100569{
570 if (outputIndex >= operation.outputs.size())
571 {
572 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
573 return nullptr;
574 }
575
576 // Model should have been validated beforehand
577 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
578
579 return &model.operands[operation.outputs[outputIndex]];
580}
581
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100582template<typename HalPolicy,
583 typename HalOperand = typename HalPolicy::Operand,
584 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100585const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100586 const HalModel& model,
587 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000588 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100589{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100590 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100591
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100592 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100593 switch (operand.lifetime)
594 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100595 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100596 {
597 // Constant found in model.operandValues
598 valueStart = &model.operandValues[operand.location.offset];
599 break;
600 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100601 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100602 {
603 // Constant specified via a Memory object
604 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
605 break;
606 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100607 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000608 {
609 // An optional input tensor with no values is not an error so should not register as a fail
610 if (optional)
611 {
612 valueStart = nullptr;
613 break;
614 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100615 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000616 }
arovir01b0717b52018-09-05 17:03:25 +0100617 default:
618 {
619 // Unsupported/invalid (e.g. can't get value of an input to the model)
620 Fail("%s: unsupported/invalid operand lifetime: %s",
621 __func__, toString(operand.lifetime).c_str());
622 valueStart = nullptr;
623 }
624 }
625
626 return valueStart;
627}
628
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100629template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100630 typename HalOperation = typename HalPolicy::Operation,
631 typename HalModel = typename HalPolicy::Model,
632 typename HalOperandType = typename HalPolicy::OperandType>
633bool GetOperandType(const HalOperation& operation,
634 uint32_t inputIndex,
635 const HalModel& model,
636 HalOperandType& type)
637{
638 using HalOperand = typename HalPolicy::Operand;
639
640 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
641 if (!operand)
642 {
643 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
644 }
645
646 type = operand->type;
647 return true;
648}
649
650template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100651 typename HalOperand = typename HalPolicy::Operand,
652 typename HalModel = typename HalPolicy::Model>
653ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
654 const HalModel& model,
655 const ConversionData& data,
656 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
657 const armnn::TensorShape* overrideTensorShape = nullptr,
658 bool optional = false)
659{
660 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
661
662 if (!IsOperandTypeSupportedForTensors(operand.type))
663 {
664 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
665 return ConstTensorPin();
666 }
667
668 if (!optional &&
669 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
670 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
671 operand.lifetime != HalOperandLifeTime::NO_VALUE)
672 {
673 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
674 return ConstTensorPin();
675 }
676
677 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
678 if (!valueStart)
679 {
680 if (optional)
681 {
682 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
683 return ConstTensorPin(true);
684 }
685 // mandatory tensor with no values
686 Fail("%s: failed to get operand address", __func__);
687 return ConstTensorPin();
688 }
689
690 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
691 if (overrideTensorShape != nullptr)
692 {
693 tensorInfo.SetShape(*overrideTensorShape);
694 }
695 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
696}
697
698template<typename HalPolicy,
699 typename HalOperation = typename HalPolicy::Operation,
700 typename HalModel = typename HalPolicy::Model>
701ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
702 uint32_t inputIndex,
703 const HalModel& model,
704 const ConversionData& data,
705 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
706 const armnn::TensorShape* overrideTensorShape = nullptr,
707 bool optional = false)
708{
709 using HalOperand = typename HalPolicy::Operand;
710
711 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
712 if (!operand)
713 {
714 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
715 return ConstTensorPin();
716 }
717 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
718 model,
719 data,
720 dimensionMappings,
721 overrideTensorShape,
722 optional);
723}
724
725template<typename HalPolicy,
726 typename OutputType,
727 typename HalOperandType = typename HalPolicy::OperandType,
728 typename HalOperation = typename HalPolicy::Operation,
729 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100730bool GetInputScalar(const HalOperation& operation,
731 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100732 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100733 OutputType& outValue,
734 const HalModel& model,
735 const ConversionData& data)
736{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100737 using HalOperand = typename HalPolicy::Operand;
738
739 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100740 if (!operand)
741 {
742 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
743 }
744
745 if (operand->type != type)
746 {
747 return Fail("%s: unexpected operand type: %s (should be %s)",
748 __func__, toString(operand->type).c_str(), toString(type).c_str());
749 }
750
751 if (operand->location.length != sizeof(OutputType))
752 {
753 return Fail("%s: incorrect operand location length: %i (should be %i)",
754 __func__, operand->location.length, sizeof(OutputType));
755 }
756
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100757 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100758 if (!valueAddress)
759 {
760 return Fail("%s: failed to get address for operand", __func__);
761 }
762
763 outValue = *(static_cast<const OutputType*>(valueAddress));
764 return true;
765}
766
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100767template<typename HalPolicy,
768 typename HalOperation = typename HalPolicy::Operation,
769 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100770bool GetInputInt32(const HalOperation& operation,
771 uint32_t inputIndex,
772 int32_t& outValue,
773 const HalModel& model,
774 const ConversionData& data)
775{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100776 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100777}
778
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100779template<typename HalPolicy,
780 typename HalOperation = typename HalPolicy::Operation,
781 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100782bool GetInputFloat32(const HalOperation& operation,
783 uint32_t inputIndex,
784 float& outValue,
785 const HalModel& model,
786 const ConversionData& data)
787{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100788 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100789}
790
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100791template<typename HalPolicy,
792 typename HalOperation = typename HalPolicy::Operation,
793 typename HalOperandType = typename HalPolicy::OperandType,
794 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100795bool GetInputActivationFunctionImpl(const HalOperation& operation,
796 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100797 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100798 ActivationFn& outActivationFunction,
799 const HalModel& model,
800 const ConversionData& data)
801{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100802 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100803 {
804 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
805 __func__,
806 toString(type).c_str(),
807 toString(OperandType::INT32).c_str(),
808 toString(OperandType::TENSOR_INT32).c_str());
809 }
810
811 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100812 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100813 {
814 return Fail("%s: failed to get activation input value", __func__);
815 }
816 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
817 return true;
818}
819
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100820template<typename HalPolicy,
821 typename HalOperation = typename HalPolicy::Operation,
822 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100823bool GetInputActivationFunction(const HalOperation& operation,
824 uint32_t inputIndex,
825 ActivationFn& outActivationFunction,
826 const HalModel& model,
827 const ConversionData& data)
828{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100829 return GetInputActivationFunctionImpl<HalPolicy>(operation,
830 inputIndex,
831 HalPolicy::OperandType::INT32,
832 outActivationFunction,
833 model,
834 data);
arovir01b0717b52018-09-05 17:03:25 +0100835}
836
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100837template<typename HalPolicy,
838 typename HalOperation = typename HalPolicy::Operation,
839 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100840bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
841 uint32_t inputIndex,
842 ActivationFn& outActivationFunction,
843 const HalModel& model,
844 const ConversionData& data)
845{
846 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100847 return GetInputActivationFunctionImpl<HalPolicy>(operation,
848 inputIndex,
849 HalPolicy::OperandType::INT32,
850 outActivationFunction,
851 model,
852 data);
arovir01b0717b52018-09-05 17:03:25 +0100853}
854
855
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100856template<typename HalPolicy,
857 typename HalOperation = typename HalPolicy::Operation,
858 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100859bool GetOptionalInputActivation(const HalOperation& operation,
860 uint32_t inputIndex,
861 ActivationFn& activationFunction,
862 const HalModel& model,
863 const ConversionData& data)
864{
865 if (operation.inputs.size() <= inputIndex)
866 {
867 activationFunction = ActivationFn::kActivationNone;
868 }
869 else
870 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100871 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100872 {
873 return Fail("%s: Operation has invalid inputs", __func__);
874 }
875 }
876 return true;
877}
878
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100879template<typename HalPolicy,
880 typename ConvolutionDescriptor,
881 typename HalOperation = typename HalPolicy::Operation,
882 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100883bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
884 uint32_t dilationXIndex,
885 ConvolutionDescriptor& descriptor,
886 const HalModel& model,
887 const ConversionData& data)
888{
889 bool success = true;
890 if (operation.inputs.size() >= dilationXIndex + 2)
891 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100892 success &= GetInputScalar<HalPolicy>(operation,
893 dilationXIndex,
894 HalPolicy::OperandType::INT32,
895 descriptor.m_DilationX,
896 model,
897 data);
898 success &= GetInputScalar<HalPolicy>(operation,
899 dilationXIndex + 1,
900 HalPolicy::OperandType::INT32,
901 descriptor.m_DilationY,
902 model,
903 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100904 }
905
906 return success;
907}
908
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100909template<typename HalPolicy,
910 typename HalOperand = typename HalPolicy::Operand,
911 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100912bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100913 std::vector<int32_t>& outValues,
914 const HalModel& model,
915 const ConversionData& data)
916{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100917 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100918 {
919 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
920 }
921
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100922 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100923 if (!startAddress)
924 {
925 return Fail("%s: failed to get operand address", __func__, operand.type);
926 }
927
928 // Check number of bytes is sensible
929 const uint32_t numBytes = operand.location.length;
930 if (numBytes % sizeof(int32_t) != 0)
931 {
932 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
933 __func__, numBytes, sizeof(int32_t));
934 }
935
936 outValues.resize(numBytes / sizeof(int32_t));
937 memcpy(outValues.data(), startAddress, numBytes);
938 return true;
939}
940
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100941template<typename HalPolicy,
942 typename HalOperation = typename HalPolicy::Operation,
943 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100944bool GetInputPaddingScheme(const HalOperation& operation,
945 uint32_t inputIndex,
946 PaddingScheme& outPaddingScheme,
947 const HalModel& model,
948 const ConversionData& data)
949{
950 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100951 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100952 {
953 return Fail("%s: failed to get padding scheme input value", __func__);
954 }
955
956 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
957 return true;
958}
959
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100960template<typename HalPolicy,
961 typename HalOperation = typename HalPolicy::Operation,
962 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100963LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
964 uint32_t inputIndex,
965 const HalModel& model,
966 ConversionData& data)
967{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100968 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +0100969 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100970 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
971
972 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100973 if (!operand)
974 {
975 Fail("%s: failed to get input operand %i", __func__, inputIndex);
976 return LayerInputHandle();
977 }
978
979 if (!IsOperandTypeSupportedForTensors(operand->type))
980 {
981 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
982 return LayerInputHandle();
983 }
984
Sadik Armagan44bcc022019-06-18 17:21:36 +0100985 try
arovir01b0717b52018-09-05 17:03:25 +0100986 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100987 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100988 if (IsDynamicTensor(operandTensorInfo))
989 {
990 Fail("%s: dynamic input tensors are not supported", __func__);
991 return LayerInputHandle();
992 }
arovir01b0717b52018-09-05 17:03:25 +0100993
Sadik Armagan44bcc022019-06-18 17:21:36 +0100994 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +0100995 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100996 case HalOperandLifeTime::MODEL_INPUT:
Aron Virginas-Tar000117b2019-07-25 16:24:49 +0100997 {
998 // NOTE: We must check whether we can support the input tensor on at least one
999 // of the provided backends; otherwise we cannot convert the operation
1000 bool isInputSupported = false;
1001 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1002 IsInputSupported,
1003 data.m_Backends,
1004 isInputSupported,
1005 operandTensorInfo);
1006
1007 if (!isInputSupported)
1008 {
1009 Fail("%s: unsupported input tensor", __func__);
1010 return LayerInputHandle();
1011 }
1012
1013 BOOST_FALLTHROUGH; // intentional fallthrough
1014 }
1015 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001016 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +01001017 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001018 // The tensor is either an operand internal to the model, or a model input.
1019 // It can be associated with an ArmNN output slot for an existing layer.
1020
1021 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1022 const uint32_t operandIndex = operation.inputs[inputIndex];
1023 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
Sadik Armagan44bcc022019-06-18 17:21:36 +01001024 }
Aron Virginas-Tar000117b2019-07-25 16:24:49 +01001025 case HalOperandLifeTime::CONSTANT_COPY: // intentional fallthrough
Sadik Armagan44bcc022019-06-18 17:21:36 +01001026 case HalOperandLifeTime::CONSTANT_REFERENCE:
1027 {
1028 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1029 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
1030 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +01001031 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001032 bool isSupported = false;
1033 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1034 IsConstantSupported,
1035 data.m_Backends,
1036 isSupported,
1037 tensorPin.GetConstTensor().GetInfo());
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001038 if (isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001039 {
1040 return LayerInputHandle();
1041 }
1042
1043 armnn::IConnectableLayer* constantLayer =
1044 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1045 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1046 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1047
1048 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1049 }
1050 else
1051 {
1052 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001053 return LayerInputHandle();
1054 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001055 break;
arovir01b0717b52018-09-05 17:03:25 +01001056 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001057 default:
arovir01b0717b52018-09-05 17:03:25 +01001058 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001059 // Unsupported lifetime for an input tensor
1060 Fail("%s: unsupported lifetime for input tensor: %s",
1061 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001062 return LayerInputHandle();
1063 }
arovir01b0717b52018-09-05 17:03:25 +01001064 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001065 }
1066 catch (UnsupportedOperand<HalOperandType>& e)
1067 {
1068 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1069 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001070 }
1071}
1072
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001073template<typename HalPolicy,
1074 typename HalOperation = typename HalPolicy::Operation,
1075 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001076bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1077 uint32_t operationOutputIndex,
1078 armnn::IConnectableLayer& layer,
1079 uint32_t layerOutputIndex,
1080 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001081 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001082{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001083 using HalOperand = typename HalPolicy::Operand;
1084
1085 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001086 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1087 {
1088 return false;
1089 }
1090
1091 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1092
1093 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1094 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1095
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001096 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
Mike Kellyb5fdf382019-06-11 16:35:25 +01001097
1098 return true;
1099}
1100
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001101template<typename HalPolicy,
1102 typename HalOperation = typename HalPolicy::Operation,
1103 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001104armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1105 uint32_t inputIndex,
1106 const HalModel& model,
1107 ConversionData& data)
1108{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001109 using HalOperand = typename HalPolicy::Operand;
1110
1111 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001112 if (!operand)
1113 {
1114 return armnn::DataLayout::NHWC;
1115 }
1116
1117 if (!IsBool(*operand))
1118 {
1119 return armnn::DataLayout::NHWC;
1120 }
1121
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001122 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001123 if (!valueAddress)
1124 {
1125 return armnn::DataLayout::NHWC;
1126 }
1127
1128 if (*(static_cast<const bool*>(valueAddress)))
1129 {
1130 return armnn::DataLayout::NCHW;
1131 }
1132 else
1133 {
1134 return armnn::DataLayout::NHWC;
1135 }
1136}
1137
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001138template<typename HalPolicy,
1139 typename HalOperation = typename HalPolicy::Operation,
1140 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001141bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1142 uint32_t outputIndex,
1143 armnn::IConnectableLayer& layer,
1144 const HalModel& model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001145 ConversionData& data)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001146{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001147 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1148 outputIndex,
1149 layer,
1150 outputIndex,
1151 model,
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001152 data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001153}
1154
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001155template<typename HalPolicy,
1156 typename HalOperation = typename HalPolicy::Operation,
1157 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001158bool ConvertToActivation(const HalOperation& operation,
1159 const char* operationName,
1160 const armnn::ActivationDescriptor& activationDesc,
1161 const HalModel& model,
1162 ConversionData& data)
1163{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001164 using HalOperand = typename HalPolicy::Operand;
1165
1166 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001167 if (!input.IsValid())
1168 {
1169 return Fail("%s: Input 0 is invalid", operationName);
1170 }
1171
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001172 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001173 if (!outputOperand)
1174 {
1175 return false;
1176 }
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001177
1178 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan2050c232019-07-23 16:59:58 +01001179 if (IsDynamicTensor(outInfo))
1180 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001181 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan2050c232019-07-23 16:59:58 +01001182 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001183
1184 bool isSupported = false;
1185 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1186 IsActivationSupported,
1187 data.m_Backends,
1188 isSupported,
1189 input.GetTensorInfo(),
1190 outInfo,
1191 activationDesc);
1192 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001193 {
1194 return false;
1195 }
1196
1197 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1198 BOOST_ASSERT(layer != nullptr);
1199 input.Connect(layer->GetInputSlot(0));
1200
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001201 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001202}
1203
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001204template<typename HalPolicy,
Sadik Armagan61113162019-07-25 09:09:40 +01001205 typename HalOperation = typename HalPolicy::Operation,
1206 typename HalModel = typename HalPolicy::Model>
1207bool ConvertReLu(const HalOperation& operation, const HalModel& model, ConversionData& data)
1208{
1209 armnn::ActivationDescriptor desc;
1210 desc.m_Function = armnn::ActivationFunction::ReLu;
1211
1212 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1213}
1214
1215template<typename HalPolicy,
1216 typename HalOperation = typename HalPolicy::Operation,
1217 typename HalModel = typename HalPolicy::Model>
1218bool ConvertReLu1(const HalOperation& operation, const HalModel& model, ConversionData& data)
1219{
1220 armnn::ActivationDescriptor desc;
1221 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1222 desc.m_A = 1.0f;
1223 desc.m_B = -1.0f;
1224
1225 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1226}
1227
1228template<typename HalPolicy,
1229 typename HalOperation = typename HalPolicy::Operation,
1230 typename HalModel = typename HalPolicy::Model>
1231bool ConvertReLu6(const HalOperation& operation, const HalModel& model, ConversionData& data)
1232{
1233 armnn::ActivationDescriptor desc;
1234 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1235 desc.m_A = 6.0f;
1236
1237 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1238}
1239
1240template<typename HalPolicy,
1241 typename HalOperation = typename HalPolicy::Operation,
1242 typename HalModel = typename HalPolicy::Model>
1243bool ConvertTanH(const HalOperation& operation, const HalModel& model, ConversionData& data)
1244{
1245 armnn::ActivationDescriptor desc;
1246 desc.m_Function = armnn::ActivationFunction::TanH;
1247 desc.m_A = 1.0f; // android nn does not support tanH parameters
1248 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1249
1250 return ConvertToActivation<HalPolicy>(operation, __func__, desc, model, data);
1251}
1252
1253template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001254 typename HalOperation = typename HalPolicy::Operation,
1255 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001256bool ConvertPaddings(const HalOperation& operation,
1257 const HalModel& model,
1258 ConversionData& data,
1259 unsigned int rank,
1260 armnn::PadDescriptor& padDescriptor)
1261{
1262 using HalOperand = typename HalPolicy::Operand;
1263
1264 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1265 if (!paddingsOperand)
1266 {
1267 return Fail("%s: Could not read paddings operand", __func__);
1268 }
1269
1270 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1271 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1272 {
1273 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1274 }
1275
1276 std::vector<int32_t> paddings;
1277 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1278
1279 // add padding for each dimension of input tensor.
1280 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1281 {
1282 int paddingBeforeInput = paddings[i];
1283 int paddingAfterInput = paddings[i + 1];
1284
1285 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1286 {
1287 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1288 }
1289
1290 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1291 }
1292
1293 return true;
1294}
1295
1296template<typename HalPolicy,
1297 typename HalOperation = typename HalPolicy::Operation,
1298 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001299bool ConvertPooling2d(const HalOperation& operation,
1300 const char* operationName,
1301 armnn::PoolingAlgorithm poolType,
1302 const HalModel& model,
1303 ConversionData& data)
1304{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001305 using HalOperand = typename HalPolicy::Operand;
1306 using HalOperandType = typename HalPolicy::OperandType;
1307
1308 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001309 if (!input.IsValid())
1310 {
1311 return Fail("%s: Could not read input 0", operationName);
1312 }
1313
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001314 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001315 if (!output)
1316 {
1317 return Fail("%s: Could not read output 0", __func__);
1318 }
1319
1320 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1321 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1322
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001323 if (IsDynamicTensor(outputInfo))
1324 {
1325 return Fail("%s: Dynamic output tensors are not supported", __func__);
1326 }
1327
arovir01b0717b52018-09-05 17:03:25 +01001328 armnn::Pooling2dDescriptor desc;
1329 desc.m_PoolType = poolType;
1330 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001331 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001332
1333 ActivationFn activation;
1334
Sadik Armagan15d63e22019-07-26 16:59:35 +01001335 auto inputSize = operation.inputs.size();
1336
1337 if (inputSize >= 10)
1338 {
1339 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1340 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1341 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1342 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1343 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1344 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1345 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1346 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1347 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1348 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
1349 {
1350 return Fail("%s: Operation has invalid inputs", operationName);
1351 }
1352
1353 if (Is12Operand(*output))
1354 {
1355 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
1356 }
1357 }
1358 else
arovir01b0717b52018-09-05 17:03:25 +01001359 {
1360 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1361 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001362 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1363 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1364 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1365 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1366 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1367 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001368 {
1369 return Fail("%s: Operation has invalid inputs", operationName);
1370 }
1371
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001372 const unsigned int inputWidth = inputInfo.GetShape()[2];
1373 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001374
1375 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1376 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
Sadik Armagan15d63e22019-07-26 16:59:35 +01001377
1378 if (Is12Operand(*output))
arovir01b0717b52018-09-05 17:03:25 +01001379 {
Sadik Armagan15d63e22019-07-26 16:59:35 +01001380 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001381 }
1382 }
1383
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001384 bool isSupported = false;
1385 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1386 IsPooling2dSupported,
1387 data.m_Backends,
1388 isSupported,
1389 inputInfo,
1390 outputInfo,
1391 desc);
1392 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001393 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001394 return false;
arovir01b0717b52018-09-05 17:03:25 +01001395 }
arovir01b0717b52018-09-05 17:03:25 +01001396
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001397 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1398 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001399 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001400 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001401 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001402
1403 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1404 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001405 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001406 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001407 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001408
1409 input.Connect(pooling2dLayer->GetInputSlot(0));
1410
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001411 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001412}
1413
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001414template<typename HalPolicy,
1415 typename HalOperation = typename HalPolicy::Operation,
1416 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001417bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1418{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001419 using HalOperand = typename HalPolicy::Operand;
1420 using HalOperandType = typename HalPolicy::OperandType;
1421
1422 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001423 if (!input.IsValid())
1424 {
1425 return Fail("%s: Operation has invalid inputs", __func__);
1426 }
1427
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001428 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001429 if (!output)
1430 {
1431 return Fail("%s: Could not read output 0", __func__);
1432 }
1433
1434 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001435 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001436
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001437 if (IsDynamicTensor(outputInfo))
1438 {
1439 return Fail("%s: Dynamic output tensors are not supported", __func__);
1440 }
1441
Mike Kellyb5fdf382019-06-11 16:35:25 +01001442 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001443 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1444 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001445
1446 if (!weightsPin.IsValid() || !biasPin.IsValid())
1447 {
1448 return Fail("%s: Operation has invalid inputs", __func__);
1449 }
1450
1451 armnn::ConstTensor weights = weightsPin.GetConstTensor();
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001452 armnn::ConstTensor bias = biasPin.GetConstTensor();
Mike Kellyb5fdf382019-06-11 16:35:25 +01001453 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1454
1455 armnn::Convolution2dDescriptor desc;
1456 desc.m_DataLayout = armnn::DataLayout::NHWC;
1457 ActivationFn activation;
1458
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001459 if (operation.inputs.size() == 10)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001460 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001461 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1462 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1463 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1464 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1465 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1466 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001467 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001468 {
1469 return Fail("%s: Operation has invalid inputs", __func__);
1470 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001471 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001472 else if (operation.inputs.size() == 7)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001473 {
1474 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001475 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1476 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1477 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001478 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001479 {
1480 return Fail("%s: Operation has invalid inputs", __func__);
1481 }
1482
1483 const uint32_t kernelX = weights.GetShape()[2];
1484 const uint32_t kernelY = weights.GetShape()[1];
1485 const uint32_t inputX = inputInfo.GetShape()[2];
1486 const uint32_t inputY = inputInfo.GetShape()[1];
1487
1488 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1489 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001490 }
1491 else
1492 {
1493 return Fail("%s: Unsupported number of operation inputs", __func__);
1494 }
1495
1496 desc.m_BiasEnabled = true;
1497 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1498
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001499 bool isSupported = false;
1500 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1501 IsConvolution2dSupported,
1502 data.m_Backends,
1503 isSupported,
1504 inputInfo,
1505 outputInfo,
1506 desc,
1507 weights.GetInfo(),
1508 biases);
1509 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001510 {
1511 return false;
1512 }
1513
1514 armnn::IConnectableLayer* startLayer =
1515 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1516
1517 if (!startLayer)
1518 {
1519 return Fail("%s: AddConvolution2dLayer failed", __func__);
1520 }
1521
1522 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1523
1524 if (!endLayer)
1525 {
1526 return Fail("%s: ProcessActivation failed", __func__);
1527 }
1528
1529 input.Connect(startLayer->GetInputSlot(0));
1530
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001531 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001532}
1533
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001534template<typename HalPolicy,
1535 typename HalOperation = typename HalPolicy::Operation,
1536 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001537bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1538{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001539 using HalOperand = typename HalPolicy::Operand;
1540 using HalOperandType = typename HalPolicy::OperandType;
1541
1542 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001543
1544 if (!input.IsValid())
1545 {
1546 return Fail("%s: Operation has invalid inputs", __func__);
1547 }
1548
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001549 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001550
1551 if (!output)
1552 {
1553 return Fail("%s: Could not read output 0", __func__);
1554 }
1555
1556 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001557 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001558
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001559 if (IsDynamicTensor(outputInfo))
1560 {
1561 return Fail("%s: Dynamic output tensors are not supported", __func__);
1562 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001563
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001564 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +01001565 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001566 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001567
1568 if (weightsOperand == nullptr)
1569 {
1570 return Fail("%s: Operand is invalid", __func__);
1571 }
1572 armnn::DepthwiseConvolution2dDescriptor desc;
1573 desc.m_DataLayout = armnn::DataLayout::NHWC;
1574
Mike Kellyb5fdf382019-06-11 16:35:25 +01001575 // Reinterpret weight data as [ H, W, I, M ]
1576 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1577 weightsOperand->dimensions[2],
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001578 inputInfo.GetShape()[3],
1579 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
Mike Kellyb5fdf382019-06-11 16:35:25 +01001580
1581 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1582 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1583
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001584 const ConstTensorPin weightsPin =
1585 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1586 1,
1587 model,
1588 data,
1589 HWIMToMIHW,
1590 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001591
1592 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001593 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001594
1595 if (!weightsPin.IsValid() || !biasPin.IsValid())
1596 {
1597 return Fail("%s: Operation has invalid inputs", __func__);
1598 }
1599
1600 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1601 armnn::ConstTensor bias = biasPin.GetConstTensor();
1602 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1603
1604 ActivationFn activation;
1605
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001606 if (operation.inputs.size() == 11)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001607 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001608 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1609 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1610 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1611 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1612 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1613 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001614 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001615 {
1616 return Fail("%s: Operation has invalid inputs", __func__);
1617 }
1618 }
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001619 else if (operation.inputs.size() == 8)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001620 {
1621 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001622 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1623 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1624 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001625 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001626 {
1627 return Fail("%s: Operation has invalid inputs", __func__);
1628 }
1629
1630 const uint32_t kernelX = weights.GetShape()[3];
1631 const uint32_t kernelY = weights.GetShape()[2];
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +01001632 const uint32_t inputX = inputInfo.GetShape()[2];
1633 const uint32_t inputY = inputInfo.GetShape()[1];
Mike Kellyb5fdf382019-06-11 16:35:25 +01001634
1635 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1636 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1637 }
1638 else
1639 {
1640 return Fail("%s: Unsupported number of operation inputs", __func__);
1641 }
1642
1643 desc.m_BiasEnabled = true;
1644 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1645
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001646 bool isSupported = false;
1647 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1648 IsDepthwiseConvolutionSupported,
1649 data.m_Backends,
1650 isSupported,
1651 inputInfo,
1652 outputInfo,
1653 desc,
1654 weights.GetInfo(),
1655 biases);
1656 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001657 {
1658 return false;
1659 }
1660
1661 armnn::IConnectableLayer* startLayer =
1662 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1663 if (!startLayer)
1664 {
1665 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1666 }
1667
1668 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1669 if (!endLayer)
1670 {
1671 return Fail("%s: ProcessActivation failed", __func__);
1672 }
1673
1674 input.Connect(startLayer->GetInputSlot(0));
1675
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001676 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001677}
1678
Mike Kelly3c673942019-07-25 09:26:06 +01001679template<typename HalPolicy,
1680 typename HalOperation = typename HalPolicy::Operation,
1681 typename HalOperand = typename HalPolicy::Operand,
1682 typename HalModel = typename HalPolicy::Model>
1683bool ConvertPad(HalOperation& operation, const HalModel& model, ConversionData& data)
1684{
Mike Kelly3c673942019-07-25 09:26:06 +01001685 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1686 if (!input.IsValid())
1687 {
1688 return Fail("%s: Operation has invalid inputs", __func__);
1689 }
1690
1691 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1692 unsigned int rank = inputInfo.GetNumDimensions();
1693
1694 armnn::PadDescriptor descriptor;
1695 if (!ConvertPaddings<HalPolicy>(operation, model, data, rank, descriptor))
1696 {
1697 return Fail("%s: Could not convert paddings", __func__);
1698 }
1699
1700 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
1701 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
1702 // (QuantizationOffset - QuantizationOffset) * scale = 0.
1703 if (inputInfo.GetDataType() == armnn::DataType::QuantisedAsymm8)
1704 {
1705 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
1706 }
1707
1708 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1709 if (!output)
1710 {
1711 return Fail("%s: Could not read output", __func__);
1712 }
1713
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001714 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Mike Kelly3c673942019-07-25 09:26:06 +01001715 if (IsDynamicTensor(outputInfo))
1716 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001717 return Fail("%s: Dynamic output tensors are not supported", __func__);
Mike Kelly3c673942019-07-25 09:26:06 +01001718 }
1719
1720 bool isSupported = false;
1721 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1722 IsPadSupported,
1723 data.m_Backends,
1724 isSupported,
1725 inputInfo,
1726 outputInfo,
1727 descriptor);
1728 if (!isSupported)
1729 {
1730 return false;
1731 }
1732
1733 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
1734 assert(layer != nullptr);
1735 input.Connect(layer->GetInputSlot(0));
1736 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1737
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001738 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +01001739}
1740
Mike Kelly0a879362019-07-29 16:56:31 +01001741template<typename HalPolicy,
1742 typename Operation = typename HalPolicy::Operation,
1743 typename Operand = typename HalPolicy::Operand,
1744 typename Model = typename HalPolicy::Model>
1745bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
1746{
1747 LayerInputHandle input0 = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
1748 LayerInputHandle input1 = ConvertToLayerInputHandle<HalPolicy>(operation, 1, model, data);
1749
1750 if (!input0.IsValid() || !input1.IsValid())
1751 {
1752 return Fail("%s: Operation has invalid inputs", __func__);
1753 }
1754
1755 // The FuseActivation parameter is always the input index 2
1756 // and it should be optional
1757 ActivationFn activationFunction;
1758 if (!GetOptionalInputActivation<HalPolicy>(operation, 2, activationFunction, model, data))
1759 {
1760 return Fail("%s: Operation has invalid inputs", __func__);
1761 }
1762
1763 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
1764 if (!output)
1765 {
1766 return Fail("%s: Could not read output 0", __func__);
1767 }
1768
1769 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1770 if (IsDynamicTensor(outputInfo))
1771 {
1772 return Fail("%s: Dynamic output tensors are not supported", __func__);
1773 }
1774
1775 bool isSupported = false;
1776 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1777 IsSubtractionSupported,
1778 data.m_Backends,
1779 isSupported,
1780 input0.GetTensorInfo(),
1781 input1.GetTensorInfo(),
1782 outputInfo);
1783 if (!isSupported)
1784 {
1785 return false;
1786 }
1787
1788 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
1789 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
1790
1791 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1792 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1793
1794 if (endLayer)
1795 {
1796 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
1797 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
1798 }
1799
1800 return Fail("%s: ProcessActivation failed", __func__);
1801}
1802
1803
saoste01b8471482018-10-10 09:44:51 +01001804} // namespace armnn_driver