blob: 8eb48fe618b43ed433f333e01a415c57c3e2fa3f [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
Ferran Balaguerd30093c2019-07-09 17:04:47 +010011#include <armnn/ILayerSupport.hpp>
12#include <armnn/BackendHelper.hpp>
arovir01b0717b52018-09-05 17:03:25 +010013
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010015#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010016
17#include <ActivationFunctor.h>
18#include <CpuExecutor.h>
19#include <OperationsUtils.h>
20
21#include <boost/assert.hpp>
22#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010023#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010024#include <boost/test/tools/floating_point_comparison.hpp>
25
26#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010027#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010028
29namespace armnn_driver
30{
31
32///
33/// Helper classes
34///
35
36struct ConversionData
37{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010038 ConversionData(const std::vector<armnn::BackendId>& backends)
39 : m_Backends(backends)
40 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010041 {}
42
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010043 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010044 armnn::INetworkPtr m_Network;
45 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
46 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
47};
48
49class LayerInputHandle
50{
51public:
52 LayerInputHandle();
53 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
54
55 bool IsValid() const;
56
57 void Connect(armnn::IInputSlot& inputSlot);
58
59 const armnn::TensorInfo& GetTensorInfo() const;
60
61private:
62 armnn::IOutputSlot* m_OutputSlot;
63 bool m_Valid;
64 armnn::TensorInfo m_TensorInfo;
65};
66
67class ConstTensorPin
68{
69public:
70 // Creates an invalid tensor pin (can be used to signal errors)
71 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
72 ConstTensorPin(bool optional = false);
73
74 // @param tensorInfo TensorInfo associated with the tensor.
75 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
76 // the model being converted.
77 // @param numBytes Number of bytes for the tensor data.
78 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
79 const armnn::PermutationVector& mappings);
80
81 ConstTensorPin(const ConstTensorPin& other) = delete;
82 ConstTensorPin(ConstTensorPin&& other) = default;
83
84 bool IsValid() const;
85 bool IsOptional() const;
86
87 const armnn::ConstTensor& GetConstTensor() const;
88 const armnn::ConstTensor* GetConstTensorPtr() const;
89
90private:
91 armnn::ConstTensor m_ConstTensor;
92
93 // Owned memory for swizzled tensor data, only required if the tensor needed
94 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
95 // the pools associated with the model being converted.
96 std::vector<uint8_t> m_SwizzledTensorData;
97
98 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
99 bool m_Optional;
100};
101
102} // namespace armnn_driver
103
104///
105/// Utility functions
106///
107
108namespace
109{
110
111using namespace armnn_driver;
112using namespace android::nn;
113
114// Convenience function to log the reason for failing to convert a model.
115// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
116template<class... Args>
117static bool Fail(const char* formatStr, Args&&... args)
118{
119 ALOGD(formatStr, std::forward<Args>(args)...);
120 return false;
121}
122
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100123// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
124// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
125#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
126 std::string reasonIfUnsupported; \
127 try { \
128 for (auto&& backendId : backends) \
129 { \
130 auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
131 if (layerSupportObject) \
132 { \
133 supported = \
134 layerSupportObject->func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
135 if (supported) \
136 { \
137 break; \
138 } \
139 else \
140 { \
141 if (reasonIfUnsupported.size() > 0) \
142 { \
143 ALOGD("%s: not supported by armnn: %s", funcName, reasonIfUnsupported.c_str()); \
144 } \
145 else \
146 { \
147 ALOGD("%s: not supported by armnn", funcName); \
148 } \
149 } \
150 } \
151 else \
152 { \
153 ALOGD("%s: backend not registered: %s", funcName, backendId.Get().c_str()); \
154 } \
155 } \
156 if (!supported) \
157 { \
158 ALOGD("%s: not supported by any specified backend", funcName); \
159 } \
160 } catch (const armnn::InvalidArgumentException &e) { \
161 throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
arovir01b0717b52018-09-05 17:03:25 +0100162 }
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100163
Mike Kellyb5fdf382019-06-11 16:35:25 +0100164template<typename Operand>
165armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100166{
167 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
168}
169
Matthew Bentham912b3622019-05-03 15:49:14 +0100170inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100171{
Matthew Bentham912b3622019-05-03 15:49:14 +0100172 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
173 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
174 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100175}
176
Mike Kellyb5fdf382019-06-11 16:35:25 +0100177#ifdef ARMNN_ANDROID_NN_V1_2
178
179inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
180{
181 return type == V1_2::OperandType::BOOL ||
182 type == V1_2::OperandType::TENSOR_FLOAT16 ||
183 type == V1_2::OperandType::TENSOR_FLOAT32 ||
184 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
185 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
186 type == V1_2::OperandType::TENSOR_INT32;
187}
188
189#endif
190
191inline bool IsBool(V1_0::Operand)
192{
193 return false;
194}
195
196#ifdef ARMNN_ANDROID_NN_V1_2
197
198inline bool IsBool(V1_2::Operand operand)
199{
200 return operand.type == V1_2::OperandType::BOOL;
201}
202
203#endif
204
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100205template<typename LayerHandleType>
206armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
207 armnn::TensorInfo reshapeInfo)
208{
209 armnn::ReshapeDescriptor reshapeDescriptor;
210 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
211
212 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
213 BOOST_ASSERT(reshapeLayer != nullptr);
214
215 // Attach the input layer to the reshape layer
216 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
217 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
218
219 return *reshapeLayer;
220}
221
222void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
223 armnn::IConnectableLayer* startLayer, armnn::INetwork& network)
arovir01b0717b52018-09-05 17:03:25 +0100224{
225 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100226
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100227 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
228 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
229
230 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
231 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
232
233 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100234 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100235 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
236 input0.Connect(startLayer->GetInputSlot(0));
237 input1.Connect(startLayer->GetInputSlot(1));
238
239 return;
240 }
241
242 // Since the number of dimensions do not match then we need to add degenerate dimensions
243 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
244
245 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
246 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
247 boost::numeric_cast<int>(inputDimensions1));
248
249 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
250 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
251 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
252
253 const armnn::TensorShape& smallShape = smallInfo.GetShape();
254 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
255 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
256 {
257 reshapedDimensions[i] = smallShape[i - sizeDifference];
258 }
259
260 armnn::TensorInfo reshapedInfo = smallInfo;
261 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
262 reshapedDimensions.data() });
263 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(network, smallInputHandle, reshapedInfo);
264
265 if (input0IsSmaller)
266 {
267 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
268 //
269 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100270 // | |
271 // Reshape |
272 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100273 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100274
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100275 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
276 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100277 }
278 else
279 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100280 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
281 //
282 // Input0 Input1
283 // | |
284 // | Reshape
285 // \ /
286 // StartLayer
287
arovir01b0717b52018-09-05 17:03:25 +0100288 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100289 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100290 }
291}
292
293void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
294 android::nn::PaddingScheme scheme)
295{
296 int32_t padHead;
297 int32_t padTail;
298 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
299 outPadHead = boost::numeric_cast<uint32_t>(padHead);
300 outPadTail = boost::numeric_cast<uint32_t>(padTail);
301}
302
Mike Kelly86b36d42019-07-12 16:39:33 +0100303#ifdef ARMNN_ANDROID_NN_V1_2
304
305void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
306 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
307{
308 int32_t padHead;
309 int32_t padTail;
310 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
311 outPadHead = boost::numeric_cast<uint32_t>(padHead);
312 outPadTail = boost::numeric_cast<uint32_t>(padTail);
313}
314
315#endif
316
Matthew Bentham912b3622019-05-03 15:49:14 +0100317Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100318{
319 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100320 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100321 shape.dimensions = operand.dimensions;
322 shape.scale = operand.scale;
323 shape.offset = operand.zeroPoint;
324 return shape;
325}
326
327// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
328// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
329// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
330// (us, in this case) to ensure they match.
331void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
332 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
333{
334 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
335 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
336 {
337 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
338 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
339 {
340 ALOGW("Bias quantization scale has been modified to match input*weights");
341 biasInfo.SetQuantizationScale(expectedBiasScale);
342 }
343 }
344}
345
346// 4D Tensor Permutations
347const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
348const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
349const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
350const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
351
352// 3D Permutation Vectors
353const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
354const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
355const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
356
357template<typename OSlot>
358armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
359 const armnn::PermutationVector& mappings)
360{
361 // Add swizzle layer
362 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
363
364 BOOST_ASSERT(layer != nullptr);
365
366 // Connect input to swizzle layer
367 input.Connect(layer->GetInputSlot(0));
368
369 // Setup swizzled output
370 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
371 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
372
373 return *layer;
374}
375
376void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
377{
378 // Add swizzle layer
379 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
380 // Connect swizzled input to layer
381 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
382}
383
384armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
385{
386 // Add deswizzle layer
387 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
388 return deswizzleLayer;
389}
390
391// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
392armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
393 LayerInputHandle& input,
394 armnn::IConnectableLayer& firstLayer,
395 armnn::IConnectableLayer& lastLayer)
396{
397 SwizzleIn(network, input, firstLayer, 0);
398 return DeswizzleOut(network, lastLayer, 0);
399}
400
401// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
402armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
403 armnn::IConnectableLayer& layer)
404{
405 return SwizzleInDeswizzleOut(network, input, layer, layer);
406}
407
408bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
409 const armnn::TensorShape & outputShape,
410 uint32_t concatDim)
411{
412 // Validate the output shape is correct given the input shapes (which have just been validated)
413 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
414 if (outputShape.GetNumDimensions() != numDimensions)
415 {
416 return Fail("%s: Output shape has wrong number of dimensions", __func__);
417 }
418
419 unsigned int outputSizeAlongConcatenatedDimension = 0;
420 for (unsigned int i = 0; i < inputShapes.size(); i++)
421 {
422 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
423 }
424
425 for (unsigned int i = 0; i < numDimensions; ++i)
426 {
427 if (i == concatDim)
428 {
429 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
430 {
431 return Fail(
432 "%s: Invalid output shape for dimension %d (%d != %d)",
433 __func__,
434 i,
435 outputShape[i],
436 outputSizeAlongConcatenatedDimension);
437 }
438 }
439 else
440 {
441 if (outputShape[i] != inputShapes[0][i])
442 {
443 return Fail("%s: Invalid output shape", __func__);
444 }
445 }
446 }
447
448 return true;
449}
450
451bool RequiresReshape(armnn::TensorShape & inputShape)
452{
453 return inputShape.GetNumDimensions() < 3;
454}
455
arovir01b0717b52018-09-05 17:03:25 +0100456void SwizzleInputs(armnn::INetwork& network,
457 std::vector<LayerInputHandle>& inputs,
458 std::vector<armnn::TensorShape>& inputShapes,
459 const armnn::PermutationVector& mapping)
460{
461 if (!mapping.IsEqual(IdentityPermutation4D))
462 {
463 size_t nInputs = inputs.size();
464 for (size_t i=0; i<nInputs; ++i)
465 {
466 // add swizzle layer
467 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
468 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
469 auto& outputInfo = outputSlot.GetTensorInfo();
470 // replace inputs with the swizzled ones
471 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
472 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
473 }
474 }
475}
476
narpra01f176d5a2018-11-18 20:17:48 +0000477bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
478 int32_t & concatDimension,
479 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100480{
narpra01f176d5a2018-11-18 20:17:48 +0000481 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100482 BOOST_ASSERT(numberOfDimensions >= 3);
483
484 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000485 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
486 // or along dimension 0 or 2 for a 3-D tensor.
487 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100488 {
narpra01f176d5a2018-11-18 20:17:48 +0000489 concatDimension = 1;
490 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
491 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100492 }
narpra01f176d5a2018-11-18 20:17:48 +0000493 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100494 {
narpra01f176d5a2018-11-18 20:17:48 +0000495 concatDimension = 0;
496 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
497 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100498 }
narpra01f176d5a2018-11-18 20:17:48 +0000499 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100500}
501
502} // anonymous namespace
503
504namespace armnn_driver
505{
506
507//// Creates an ArmNN activation layer and connects it to the given layer, if the
508//// passed in AndroidNN activation function requires so.
509//// @return The end layer of the sequence of layers built for the given AndroidNN
510//// activation function or nullptr if an error occurred (e.g. unsupported activation).
511//// Note that the end layer matches the input layer if no activation is required
512//// (the sequence of layers has length 1).
513armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
514 ActivationFn activation,
515 armnn::IConnectableLayer* prevLayer,
516 ConversionData& data);
517
518} // namespace armnn_driver
519
520///
521/// Utility templates
522///
523
524namespace armnn_driver
525{
526
527using namespace android::nn;
528
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100529template<typename HalPolicy,
530 typename HalOperand = typename HalPolicy::Operand,
531 typename HalOperation = typename HalPolicy::Operation,
532 typename HalModel = typename HalPolicy::Model>
533const HalOperand* GetInputOperand(const HalOperation& operation,
534 uint32_t inputIndex,
535 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100536 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100537{
538 if (inputIndex >= operation.inputs.size())
539 {
saoste01b8471482018-10-10 09:44:51 +0100540 if (failOnIndexOutOfBounds)
541 {
542 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
543 }
arovir01b0717b52018-09-05 17:03:25 +0100544 return nullptr;
545 }
546
547 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
548 return &model.operands[operation.inputs[inputIndex]];
549}
550
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100551template<typename HalPolicy,
552 typename HalOperand = typename HalPolicy::Operand,
553 typename HalOperation = typename HalPolicy::Operation,
554 typename HalModel = typename HalPolicy::Model>
555const HalOperand* GetOutputOperand(const HalOperation& operation,
556 uint32_t outputIndex,
557 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100558{
559 if (outputIndex >= operation.outputs.size())
560 {
561 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
562 return nullptr;
563 }
564
565 // Model should have been validated beforehand
566 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
567
568 return &model.operands[operation.outputs[outputIndex]];
569}
570
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100571template<typename HalPolicy,
572 typename HalOperand = typename HalPolicy::Operand,
573 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100574const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100575 const HalModel& model,
576 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000577 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100578{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100579 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100580
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100581 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100582 switch (operand.lifetime)
583 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100584 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100585 {
586 // Constant found in model.operandValues
587 valueStart = &model.operandValues[operand.location.offset];
588 break;
589 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100590 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100591 {
592 // Constant specified via a Memory object
593 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
594 break;
595 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100596 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000597 {
598 // An optional input tensor with no values is not an error so should not register as a fail
599 if (optional)
600 {
601 valueStart = nullptr;
602 break;
603 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100604 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000605 }
arovir01b0717b52018-09-05 17:03:25 +0100606 default:
607 {
608 // Unsupported/invalid (e.g. can't get value of an input to the model)
609 Fail("%s: unsupported/invalid operand lifetime: %s",
610 __func__, toString(operand.lifetime).c_str());
611 valueStart = nullptr;
612 }
613 }
614
615 return valueStart;
616}
617
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100618template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100619 typename HalOperation = typename HalPolicy::Operation,
620 typename HalModel = typename HalPolicy::Model,
621 typename HalOperandType = typename HalPolicy::OperandType>
622bool GetOperandType(const HalOperation& operation,
623 uint32_t inputIndex,
624 const HalModel& model,
625 HalOperandType& type)
626{
627 using HalOperand = typename HalPolicy::Operand;
628
629 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
630 if (!operand)
631 {
632 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
633 }
634
635 type = operand->type;
636 return true;
637}
638
639template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100640 typename HalOperand = typename HalPolicy::Operand,
641 typename HalModel = typename HalPolicy::Model>
642ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
643 const HalModel& model,
644 const ConversionData& data,
645 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
646 const armnn::TensorShape* overrideTensorShape = nullptr,
647 bool optional = false)
648{
649 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
650
651 if (!IsOperandTypeSupportedForTensors(operand.type))
652 {
653 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
654 return ConstTensorPin();
655 }
656
657 if (!optional &&
658 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
659 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
660 operand.lifetime != HalOperandLifeTime::NO_VALUE)
661 {
662 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
663 return ConstTensorPin();
664 }
665
666 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
667 if (!valueStart)
668 {
669 if (optional)
670 {
671 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
672 return ConstTensorPin(true);
673 }
674 // mandatory tensor with no values
675 Fail("%s: failed to get operand address", __func__);
676 return ConstTensorPin();
677 }
678
679 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
680 if (overrideTensorShape != nullptr)
681 {
682 tensorInfo.SetShape(*overrideTensorShape);
683 }
684 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
685}
686
687template<typename HalPolicy,
688 typename HalOperation = typename HalPolicy::Operation,
689 typename HalModel = typename HalPolicy::Model>
690ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
691 uint32_t inputIndex,
692 const HalModel& model,
693 const ConversionData& data,
694 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
695 const armnn::TensorShape* overrideTensorShape = nullptr,
696 bool optional = false)
697{
698 using HalOperand = typename HalPolicy::Operand;
699
700 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
701 if (!operand)
702 {
703 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
704 return ConstTensorPin();
705 }
706 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
707 model,
708 data,
709 dimensionMappings,
710 overrideTensorShape,
711 optional);
712}
713
714template<typename HalPolicy,
715 typename OutputType,
716 typename HalOperandType = typename HalPolicy::OperandType,
717 typename HalOperation = typename HalPolicy::Operation,
718 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100719bool GetInputScalar(const HalOperation& operation,
720 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100721 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100722 OutputType& outValue,
723 const HalModel& model,
724 const ConversionData& data)
725{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100726 using HalOperand = typename HalPolicy::Operand;
727
728 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100729 if (!operand)
730 {
731 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
732 }
733
734 if (operand->type != type)
735 {
736 return Fail("%s: unexpected operand type: %s (should be %s)",
737 __func__, toString(operand->type).c_str(), toString(type).c_str());
738 }
739
740 if (operand->location.length != sizeof(OutputType))
741 {
742 return Fail("%s: incorrect operand location length: %i (should be %i)",
743 __func__, operand->location.length, sizeof(OutputType));
744 }
745
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100746 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100747 if (!valueAddress)
748 {
749 return Fail("%s: failed to get address for operand", __func__);
750 }
751
752 outValue = *(static_cast<const OutputType*>(valueAddress));
753 return true;
754}
755
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100756template<typename HalPolicy,
757 typename HalOperation = typename HalPolicy::Operation,
758 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100759bool GetInputInt32(const HalOperation& operation,
760 uint32_t inputIndex,
761 int32_t& outValue,
762 const HalModel& model,
763 const ConversionData& data)
764{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100765 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100766}
767
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100768template<typename HalPolicy,
769 typename HalOperation = typename HalPolicy::Operation,
770 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100771bool GetInputFloat32(const HalOperation& operation,
772 uint32_t inputIndex,
773 float& outValue,
774 const HalModel& model,
775 const ConversionData& data)
776{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100777 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100778}
779
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100780template<typename HalPolicy,
781 typename HalOperation = typename HalPolicy::Operation,
782 typename HalOperandType = typename HalPolicy::OperandType,
783 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100784bool GetInputActivationFunctionImpl(const HalOperation& operation,
785 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100786 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100787 ActivationFn& outActivationFunction,
788 const HalModel& model,
789 const ConversionData& data)
790{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100791 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100792 {
793 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
794 __func__,
795 toString(type).c_str(),
796 toString(OperandType::INT32).c_str(),
797 toString(OperandType::TENSOR_INT32).c_str());
798 }
799
800 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100801 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100802 {
803 return Fail("%s: failed to get activation input value", __func__);
804 }
805 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
806 return true;
807}
808
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100809template<typename HalPolicy,
810 typename HalOperation = typename HalPolicy::Operation,
811 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100812bool GetInputActivationFunction(const HalOperation& operation,
813 uint32_t inputIndex,
814 ActivationFn& outActivationFunction,
815 const HalModel& model,
816 const ConversionData& data)
817{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100818 return GetInputActivationFunctionImpl<HalPolicy>(operation,
819 inputIndex,
820 HalPolicy::OperandType::INT32,
821 outActivationFunction,
822 model,
823 data);
arovir01b0717b52018-09-05 17:03:25 +0100824}
825
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100826template<typename HalPolicy,
827 typename HalOperation = typename HalPolicy::Operation,
828 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100829bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
830 uint32_t inputIndex,
831 ActivationFn& outActivationFunction,
832 const HalModel& model,
833 const ConversionData& data)
834{
835 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100836 return GetInputActivationFunctionImpl<HalPolicy>(operation,
837 inputIndex,
838 HalPolicy::OperandType::INT32,
839 outActivationFunction,
840 model,
841 data);
arovir01b0717b52018-09-05 17:03:25 +0100842}
843
844
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100845template<typename HalPolicy,
846 typename HalOperation = typename HalPolicy::Operation,
847 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100848bool GetOptionalInputActivation(const HalOperation& operation,
849 uint32_t inputIndex,
850 ActivationFn& activationFunction,
851 const HalModel& model,
852 const ConversionData& data)
853{
854 if (operation.inputs.size() <= inputIndex)
855 {
856 activationFunction = ActivationFn::kActivationNone;
857 }
858 else
859 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100860 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100861 {
862 return Fail("%s: Operation has invalid inputs", __func__);
863 }
864 }
865 return true;
866}
867
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100868template<typename HalPolicy,
869 typename ConvolutionDescriptor,
870 typename HalOperation = typename HalPolicy::Operation,
871 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100872bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
873 uint32_t dilationXIndex,
874 ConvolutionDescriptor& descriptor,
875 const HalModel& model,
876 const ConversionData& data)
877{
878 bool success = true;
879 if (operation.inputs.size() >= dilationXIndex + 2)
880 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100881 success &= GetInputScalar<HalPolicy>(operation,
882 dilationXIndex,
883 HalPolicy::OperandType::INT32,
884 descriptor.m_DilationX,
885 model,
886 data);
887 success &= GetInputScalar<HalPolicy>(operation,
888 dilationXIndex + 1,
889 HalPolicy::OperandType::INT32,
890 descriptor.m_DilationY,
891 model,
892 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100893 }
894
895 return success;
896}
897
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100898template<typename HalPolicy,
899 typename HalOperand = typename HalPolicy::Operand,
900 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100901bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100902 std::vector<int32_t>& outValues,
903 const HalModel& model,
904 const ConversionData& data)
905{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100906 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100907 {
908 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
909 }
910
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100911 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100912 if (!startAddress)
913 {
914 return Fail("%s: failed to get operand address", __func__, operand.type);
915 }
916
917 // Check number of bytes is sensible
918 const uint32_t numBytes = operand.location.length;
919 if (numBytes % sizeof(int32_t) != 0)
920 {
921 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
922 __func__, numBytes, sizeof(int32_t));
923 }
924
925 outValues.resize(numBytes / sizeof(int32_t));
926 memcpy(outValues.data(), startAddress, numBytes);
927 return true;
928}
929
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100930template<typename HalPolicy,
931 typename HalOperation = typename HalPolicy::Operation,
932 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100933bool GetInputPaddingScheme(const HalOperation& operation,
934 uint32_t inputIndex,
935 PaddingScheme& outPaddingScheme,
936 const HalModel& model,
937 const ConversionData& data)
938{
939 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100940 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100941 {
942 return Fail("%s: failed to get padding scheme input value", __func__);
943 }
944
945 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
946 return true;
947}
948
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100949template<typename HalPolicy,
950 typename HalOperation = typename HalPolicy::Operation,
951 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100952LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
953 uint32_t inputIndex,
954 const HalModel& model,
955 ConversionData& data)
956{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100957 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +0100958 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100959 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
960
961 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100962 if (!operand)
963 {
964 Fail("%s: failed to get input operand %i", __func__, inputIndex);
965 return LayerInputHandle();
966 }
967
968 if (!IsOperandTypeSupportedForTensors(operand->type))
969 {
970 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
971 return LayerInputHandle();
972 }
973
Sadik Armagan44bcc022019-06-18 17:21:36 +0100974 try
arovir01b0717b52018-09-05 17:03:25 +0100975 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100976 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
arovir01b0717b52018-09-05 17:03:25 +0100977
Sadik Armagan44bcc022019-06-18 17:21:36 +0100978 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +0100979 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100980 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
981 case HalOperandLifeTime::MODEL_INPUT:
982 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +0100983 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100984 // The tensor is either an operand internal to the model, or a model input.
985 // It can be associated with an ArmNN output slot for an existing layer.
986
987 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
988 const uint32_t operandIndex = operation.inputs[inputIndex];
989 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
990 break;
991 }
992 case HalOperandLifeTime::CONSTANT_COPY:
993 case HalOperandLifeTime::CONSTANT_REFERENCE:
994 {
995 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
996 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
997 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +0100998 {
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100999 bool isSupported = false;
1000 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1001 IsConstantSupported,
1002 data.m_Backends,
1003 isSupported,
1004 tensorPin.GetConstTensor().GetInfo());
1005 if (isSupported)
Sadik Armagan44bcc022019-06-18 17:21:36 +01001006 {
1007 return LayerInputHandle();
1008 }
1009
1010 armnn::IConnectableLayer* constantLayer =
1011 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1012 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1013 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1014
1015 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1016 }
1017 else
1018 {
1019 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001020 return LayerInputHandle();
1021 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001022 break;
arovir01b0717b52018-09-05 17:03:25 +01001023 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001024 default:
arovir01b0717b52018-09-05 17:03:25 +01001025 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001026 // Unsupported lifetime for an input tensor
1027 Fail("%s: unsupported lifetime for input tensor: %s",
1028 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001029 return LayerInputHandle();
1030 }
arovir01b0717b52018-09-05 17:03:25 +01001031 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001032 }
1033 catch (UnsupportedOperand<HalOperandType>& e)
1034 {
1035 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1036 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001037 }
1038}
1039
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001040template<typename HalPolicy,
1041 typename HalOperation = typename HalPolicy::Operation,
1042 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001043bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1044 uint32_t operationOutputIndex,
1045 armnn::IConnectableLayer& layer,
1046 uint32_t layerOutputIndex,
1047 const HalModel& model,
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001048 ConversionData& data,
1049 const armnn::Optional<armnn::TensorInfo>& outputInfo = armnn::EmptyOptional())
Mike Kellyb5fdf382019-06-11 16:35:25 +01001050{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001051 using HalOperand = typename HalPolicy::Operand;
1052
1053 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001054 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1055 {
1056 return false;
1057 }
1058
1059 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1060
1061 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1062 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1063
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001064 if (outputInfo.has_value())
1065 {
1066 outputSlot.SetTensorInfo(outputInfo.value());
1067 ALOGD("Output info overwritten");
1068 }
1069 else
1070 {
1071 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1072 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001073
1074 return true;
1075}
1076
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001077template<typename HalPolicy,
1078 typename HalOperation = typename HalPolicy::Operation,
1079 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001080armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1081 uint32_t inputIndex,
1082 const HalModel& model,
1083 ConversionData& data)
1084{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001085 using HalOperand = typename HalPolicy::Operand;
1086
1087 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001088 if (!operand)
1089 {
1090 return armnn::DataLayout::NHWC;
1091 }
1092
1093 if (!IsBool(*operand))
1094 {
1095 return armnn::DataLayout::NHWC;
1096 }
1097
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001098 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001099 if (!valueAddress)
1100 {
1101 return armnn::DataLayout::NHWC;
1102 }
1103
1104 if (*(static_cast<const bool*>(valueAddress)))
1105 {
1106 return armnn::DataLayout::NCHW;
1107 }
1108 else
1109 {
1110 return armnn::DataLayout::NHWC;
1111 }
1112}
1113
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001114template<typename HalPolicy,
1115 typename HalOperation = typename HalPolicy::Operation,
1116 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001117bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1118 uint32_t outputIndex,
1119 armnn::IConnectableLayer& layer,
1120 const HalModel& model,
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001121 ConversionData& data,
1122 const armnn::Optional<armnn::TensorInfo>& outputInfo = armnn::EmptyOptional())
Mike Kellyb5fdf382019-06-11 16:35:25 +01001123{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001124 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1125 outputIndex,
1126 layer,
1127 outputIndex,
1128 model,
1129 data,
1130 outputInfo);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001131}
1132
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001133template<typename HalPolicy,
1134 typename HalOperation = typename HalPolicy::Operation,
1135 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001136bool ConvertToActivation(const HalOperation& operation,
1137 const char* operationName,
1138 const armnn::ActivationDescriptor& activationDesc,
1139 const HalModel& model,
1140 ConversionData& data)
1141{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001142 using HalOperand = typename HalPolicy::Operand;
1143
1144 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001145 if (!input.IsValid())
1146 {
1147 return Fail("%s: Input 0 is invalid", operationName);
1148 }
1149
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001150 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001151 if (!outputOperand)
1152 {
1153 return false;
1154 }
1155 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001156
1157 bool isSupported = false;
1158 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1159 IsActivationSupported,
1160 data.m_Backends,
1161 isSupported,
1162 input.GetTensorInfo(),
1163 outInfo,
1164 activationDesc);
1165 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001166 {
1167 return false;
1168 }
1169
1170 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1171 BOOST_ASSERT(layer != nullptr);
1172 input.Connect(layer->GetInputSlot(0));
1173
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001174 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001175}
1176
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001177template<typename HalPolicy,
1178 typename HalOperation = typename HalPolicy::Operation,
1179 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001180bool ConvertPaddings(const HalOperation& operation,
1181 const HalModel& model,
1182 ConversionData& data,
1183 unsigned int rank,
1184 armnn::PadDescriptor& padDescriptor)
1185{
1186 using HalOperand = typename HalPolicy::Operand;
1187
1188 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1189 if (!paddingsOperand)
1190 {
1191 return Fail("%s: Could not read paddings operand", __func__);
1192 }
1193
1194 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1195 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1196 {
1197 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1198 }
1199
1200 std::vector<int32_t> paddings;
1201 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1202
1203 // add padding for each dimension of input tensor.
1204 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1205 {
1206 int paddingBeforeInput = paddings[i];
1207 int paddingAfterInput = paddings[i + 1];
1208
1209 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1210 {
1211 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1212 }
1213
1214 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1215 }
1216
1217 return true;
1218}
1219
1220template<typename HalPolicy,
1221 typename HalOperation = typename HalPolicy::Operation,
1222 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001223bool ConvertPooling2d(const HalOperation& operation,
1224 const char* operationName,
1225 armnn::PoolingAlgorithm poolType,
1226 const HalModel& model,
1227 ConversionData& data)
1228{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001229 using HalOperand = typename HalPolicy::Operand;
1230 using HalOperandType = typename HalPolicy::OperandType;
1231
1232 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001233 if (!input.IsValid())
1234 {
1235 return Fail("%s: Could not read input 0", operationName);
1236 }
1237
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001238 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001239 if (!output)
1240 {
1241 return Fail("%s: Could not read output 0", __func__);
1242 }
1243
1244 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1245 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1246
arovir01b0717b52018-09-05 17:03:25 +01001247 armnn::Pooling2dDescriptor desc;
1248 desc.m_PoolType = poolType;
1249 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001250 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001251
1252 ActivationFn activation;
1253
1254 if (operation.inputs.size() == 7)
1255 {
1256 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1257 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001258 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1259 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1260 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1261 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1262 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1263 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001264 {
1265 return Fail("%s: Operation has invalid inputs", operationName);
1266 }
1267
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001268 const unsigned int inputWidth = inputInfo.GetShape()[2];
1269 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001270
1271 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1272 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1273 }
1274 else
1275 {
1276 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001277 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1278 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1279 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1280 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1281 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1282 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1283 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1284 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1285 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001286 {
1287 return Fail("%s: Operation has invalid inputs", operationName);
1288 }
1289 }
1290
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001291 bool isSupported = false;
1292 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1293 IsPooling2dSupported,
1294 data.m_Backends,
1295 isSupported,
1296 inputInfo,
1297 outputInfo,
1298 desc);
1299 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001300 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001301 return false;
arovir01b0717b52018-09-05 17:03:25 +01001302 }
arovir01b0717b52018-09-05 17:03:25 +01001303
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001304 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1305 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001306 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001307 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001308 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001309
1310 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1311 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001312 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001313 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001314 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001315
1316 input.Connect(pooling2dLayer->GetInputSlot(0));
1317
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001318 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001319}
1320
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001321template<typename HalPolicy,
1322 typename HalOperation = typename HalPolicy::Operation,
1323 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001324bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1325{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001326 using HalOperand = typename HalPolicy::Operand;
1327 using HalOperandType = typename HalPolicy::OperandType;
1328
1329 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001330 if (!input.IsValid())
1331 {
1332 return Fail("%s: Operation has invalid inputs", __func__);
1333 }
1334
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001335 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001336 if (!output)
1337 {
1338 return Fail("%s: Could not read output 0", __func__);
1339 }
1340
1341 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1342 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1343
1344 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001345 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1346 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001347
1348 if (!weightsPin.IsValid() || !biasPin.IsValid())
1349 {
1350 return Fail("%s: Operation has invalid inputs", __func__);
1351 }
1352
1353 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1354 armnn::ConstTensor bias = biasPin.GetConstTensor();
1355 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1356
1357 armnn::Convolution2dDescriptor desc;
1358 desc.m_DataLayout = armnn::DataLayout::NHWC;
1359 ActivationFn activation;
1360
1361 if (operation.inputs.size() >= 10)
1362 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001363 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1364 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1365 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1366 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1367 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1368 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1369 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data) ||
1370 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 11, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001371 {
1372 return Fail("%s: Operation has invalid inputs", __func__);
1373 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001374 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001375 }
1376 else if (operation.inputs.size() >= 7)
1377 {
1378 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001379 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1380 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1381 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1382 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data) ||
1383 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 8, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001384 {
1385 return Fail("%s: Operation has invalid inputs", __func__);
1386 }
1387
1388 const uint32_t kernelX = weights.GetShape()[2];
1389 const uint32_t kernelY = weights.GetShape()[1];
1390 const uint32_t inputX = inputInfo.GetShape()[2];
1391 const uint32_t inputY = inputInfo.GetShape()[1];
1392
1393 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1394 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1395
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001396 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001397 }
1398 else
1399 {
1400 return Fail("%s: Unsupported number of operation inputs", __func__);
1401 }
1402
1403 desc.m_BiasEnabled = true;
1404 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1405
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001406 bool isSupported = false;
1407 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1408 IsConvolution2dSupported,
1409 data.m_Backends,
1410 isSupported,
1411 inputInfo,
1412 outputInfo,
1413 desc,
1414 weights.GetInfo(),
1415 biases);
1416 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001417 {
1418 return false;
1419 }
1420
1421 armnn::IConnectableLayer* startLayer =
1422 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1423
1424 if (!startLayer)
1425 {
1426 return Fail("%s: AddConvolution2dLayer failed", __func__);
1427 }
1428
1429 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1430
1431 if (!endLayer)
1432 {
1433 return Fail("%s: ProcessActivation failed", __func__);
1434 }
1435
1436 input.Connect(startLayer->GetInputSlot(0));
1437
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001438 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001439}
1440
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001441template<typename HalPolicy,
1442 typename HalOperation = typename HalPolicy::Operation,
1443 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001444bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1445{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001446 using HalOperand = typename HalPolicy::Operand;
1447 using HalOperandType = typename HalPolicy::OperandType;
1448
1449 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001450
1451 if (!input.IsValid())
1452 {
1453 return Fail("%s: Operation has invalid inputs", __func__);
1454 }
1455
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001456 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001457
1458 if (!output)
1459 {
1460 return Fail("%s: Could not read output 0", __func__);
1461 }
1462
1463 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1464 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1465
1466 // ArmNN does not currently support non-fixed weights or bias
1467
1468 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001469 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001470
1471 if (weightsOperand == nullptr)
1472 {
1473 return Fail("%s: Operand is invalid", __func__);
1474 }
1475 armnn::DepthwiseConvolution2dDescriptor desc;
1476 desc.m_DataLayout = armnn::DataLayout::NHWC;
1477
1478 // Look ahead to find the optional DataLayout, if present
1479 if (operation.inputs.size() >= 12)
1480 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001481 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 11, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001482 }
1483 else if (operation.inputs.size() >= 9)
1484 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001485 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 8, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001486 }
1487
1488 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1489 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
1490 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1491 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1492
1493 // Reinterpret weight data as [ H, W, I, M ]
1494 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1495 weightsOperand->dimensions[2],
1496 inputInfo.GetShape()[channelsIndex],
1497 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
1498
1499 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1500 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1501
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001502 const ConstTensorPin weightsPin =
1503 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1504 1,
1505 model,
1506 data,
1507 HWIMToMIHW,
1508 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001509
1510 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001511 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001512
1513 if (!weightsPin.IsValid() || !biasPin.IsValid())
1514 {
1515 return Fail("%s: Operation has invalid inputs", __func__);
1516 }
1517
1518 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1519 armnn::ConstTensor bias = biasPin.GetConstTensor();
1520 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1521
1522 ActivationFn activation;
1523
1524 if (operation.inputs.size() >= 11)
1525 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001526 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1527 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1528 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1529 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1530 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1531 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1532 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data) ||
1533 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 12, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001534 {
1535 return Fail("%s: Operation has invalid inputs", __func__);
1536 }
1537 }
1538 else if (operation.inputs.size() >= 8)
1539 {
1540 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001541 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1542 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1543 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1544 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data) ||
1545 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 9, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001546 {
1547 return Fail("%s: Operation has invalid inputs", __func__);
1548 }
1549
1550 const uint32_t kernelX = weights.GetShape()[3];
1551 const uint32_t kernelY = weights.GetShape()[2];
1552 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1553 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1554
1555 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1556 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1557 }
1558 else
1559 {
1560 return Fail("%s: Unsupported number of operation inputs", __func__);
1561 }
1562
1563 desc.m_BiasEnabled = true;
1564 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1565
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001566 bool isSupported = false;
1567 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1568 IsDepthwiseConvolutionSupported,
1569 data.m_Backends,
1570 isSupported,
1571 inputInfo,
1572 outputInfo,
1573 desc,
1574 weights.GetInfo(),
1575 biases);
1576 if (!isSupported)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001577 {
1578 return false;
1579 }
1580
1581 armnn::IConnectableLayer* startLayer =
1582 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1583 if (!startLayer)
1584 {
1585 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1586 }
1587
1588 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1589 if (!endLayer)
1590 {
1591 return Fail("%s: ProcessActivation failed", __func__);
1592 }
1593
1594 input.Connect(startLayer->GetInputSlot(0));
1595
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001596 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001597}
1598
saoste01b8471482018-10-10 09:44:51 +01001599} // namespace armnn_driver