blob: d30b8a4ea55d8ed3c6d8bbb687fabb6498669e23 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
11
Mike Kellyb5fdf382019-06-11 16:35:25 +010012#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010013#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010014
15#include <ActivationFunctor.h>
16#include <CpuExecutor.h>
17#include <OperationsUtils.h>
18
19#include <boost/assert.hpp>
20#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010021#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010022#include <boost/test/tools/floating_point_comparison.hpp>
23
24#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010025#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010026
27namespace armnn_driver
28{
29
30///
31/// Helper classes
32///
33
34struct ConversionData
35{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010036 ConversionData(const std::vector<armnn::BackendId>& backends)
37 : m_Backends(backends)
38 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010039 {}
40
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010041 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010042 armnn::INetworkPtr m_Network;
43 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
44 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
45};
46
47class LayerInputHandle
48{
49public:
50 LayerInputHandle();
51 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
52
53 bool IsValid() const;
54
55 void Connect(armnn::IInputSlot& inputSlot);
56
57 const armnn::TensorInfo& GetTensorInfo() const;
58
59private:
60 armnn::IOutputSlot* m_OutputSlot;
61 bool m_Valid;
62 armnn::TensorInfo m_TensorInfo;
63};
64
65class ConstTensorPin
66{
67public:
68 // Creates an invalid tensor pin (can be used to signal errors)
69 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
70 ConstTensorPin(bool optional = false);
71
72 // @param tensorInfo TensorInfo associated with the tensor.
73 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
74 // the model being converted.
75 // @param numBytes Number of bytes for the tensor data.
76 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
77 const armnn::PermutationVector& mappings);
78
79 ConstTensorPin(const ConstTensorPin& other) = delete;
80 ConstTensorPin(ConstTensorPin&& other) = default;
81
82 bool IsValid() const;
83 bool IsOptional() const;
84
85 const armnn::ConstTensor& GetConstTensor() const;
86 const armnn::ConstTensor* GetConstTensorPtr() const;
87
88private:
89 armnn::ConstTensor m_ConstTensor;
90
91 // Owned memory for swizzled tensor data, only required if the tensor needed
92 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
93 // the pools associated with the model being converted.
94 std::vector<uint8_t> m_SwizzledTensorData;
95
96 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
97 bool m_Optional;
98};
99
100} // namespace armnn_driver
101
102///
103/// Utility functions
104///
105
106namespace
107{
108
109using namespace armnn_driver;
110using namespace android::nn;
111
112// Convenience function to log the reason for failing to convert a model.
113// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
114template<class... Args>
115static bool Fail(const char* formatStr, Args&&... args)
116{
117 ALOGD(formatStr, std::forward<Args>(args)...);
118 return false;
119}
120
121// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
122// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
123template<typename IsLayerSupportedFunc, typename ... Args>
124bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
125{
126 std::vector<char> unsupportedReason(1024+1);
127 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
128 if(isSupported)
129 {
130 return true;
131 }
132 else
133 {
134 std::string sUnsupportedReason(unsupportedReason.data());
135 if (sUnsupportedReason.size() > 0)
136 {
137 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
138 } else
139 {
140 ALOGD("%s: not supported by armnn", funcName);
141 }
142 return false;
143 }
144}
145
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100146template<typename IsLayerSupportedFunc, typename ... Args>
147bool IsLayerSupportedForAnyBackend(const char* funcName,
148 IsLayerSupportedFunc f,
149 const std::vector<armnn::BackendId>& backends,
150 Args&&... args)
151{
152 for (auto&& backend : backends)
153 {
154 if (IsLayerSupported(funcName, f, backend, std::forward<Args>(args)...))
155 {
156 return true;
157 }
158 }
159
160 ALOGD("%s: not supported by any specified backend", funcName);
161 return false;
162}
163
Mike Kellyb5fdf382019-06-11 16:35:25 +0100164template<typename Operand>
165armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100166{
167 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
168}
169
Matthew Bentham912b3622019-05-03 15:49:14 +0100170inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100171{
Matthew Bentham912b3622019-05-03 15:49:14 +0100172 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
173 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
174 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100175}
176
Mike Kellyb5fdf382019-06-11 16:35:25 +0100177#ifdef ARMNN_ANDROID_NN_V1_2
178
179inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
180{
181 return type == V1_2::OperandType::BOOL ||
182 type == V1_2::OperandType::TENSOR_FLOAT16 ||
183 type == V1_2::OperandType::TENSOR_FLOAT32 ||
184 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
185 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
186 type == V1_2::OperandType::TENSOR_INT32;
187}
188
189#endif
190
191inline bool IsBool(V1_0::Operand)
192{
193 return false;
194}
195
196#ifdef ARMNN_ANDROID_NN_V1_2
197
198inline bool IsBool(V1_2::Operand operand)
199{
200 return operand.type == V1_2::OperandType::BOOL;
201}
202
203#endif
204
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100205template<typename LayerHandleType>
206armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
207 armnn::TensorInfo reshapeInfo)
208{
209 armnn::ReshapeDescriptor reshapeDescriptor;
210 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
211
212 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
213 BOOST_ASSERT(reshapeLayer != nullptr);
214
215 // Attach the input layer to the reshape layer
216 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
217 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
218
219 return *reshapeLayer;
220}
221
222void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
223 armnn::IConnectableLayer* startLayer, armnn::INetwork& network)
arovir01b0717b52018-09-05 17:03:25 +0100224{
225 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100226
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100227 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
228 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
229
230 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
231 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
232
233 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100234 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100235 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
236 input0.Connect(startLayer->GetInputSlot(0));
237 input1.Connect(startLayer->GetInputSlot(1));
238
239 return;
240 }
241
242 // Since the number of dimensions do not match then we need to add degenerate dimensions
243 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
244
245 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
246 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
247 boost::numeric_cast<int>(inputDimensions1));
248
249 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
250 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
251 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
252
253 const armnn::TensorShape& smallShape = smallInfo.GetShape();
254 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
255 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
256 {
257 reshapedDimensions[i] = smallShape[i - sizeDifference];
258 }
259
260 armnn::TensorInfo reshapedInfo = smallInfo;
261 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
262 reshapedDimensions.data() });
263 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(network, smallInputHandle, reshapedInfo);
264
265 if (input0IsSmaller)
266 {
267 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
268 //
269 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100270 // | |
271 // Reshape |
272 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100273 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100274
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100275 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
276 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100277 }
278 else
279 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100280 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
281 //
282 // Input0 Input1
283 // | |
284 // | Reshape
285 // \ /
286 // StartLayer
287
arovir01b0717b52018-09-05 17:03:25 +0100288 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100289 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100290 }
291}
292
293void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
294 android::nn::PaddingScheme scheme)
295{
296 int32_t padHead;
297 int32_t padTail;
298 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
299 outPadHead = boost::numeric_cast<uint32_t>(padHead);
300 outPadTail = boost::numeric_cast<uint32_t>(padTail);
301}
302
Matthew Bentham912b3622019-05-03 15:49:14 +0100303Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100304{
305 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100306 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100307 shape.dimensions = operand.dimensions;
308 shape.scale = operand.scale;
309 shape.offset = operand.zeroPoint;
310 return shape;
311}
312
313// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
314// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
315// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
316// (us, in this case) to ensure they match.
317void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
318 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
319{
320 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
321 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
322 {
323 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
324 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
325 {
326 ALOGW("Bias quantization scale has been modified to match input*weights");
327 biasInfo.SetQuantizationScale(expectedBiasScale);
328 }
329 }
330}
331
332// 4D Tensor Permutations
333const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
334const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
335const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
336const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
337
338// 3D Permutation Vectors
339const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
340const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
341const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
342
343template<typename OSlot>
344armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
345 const armnn::PermutationVector& mappings)
346{
347 // Add swizzle layer
348 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
349
350 BOOST_ASSERT(layer != nullptr);
351
352 // Connect input to swizzle layer
353 input.Connect(layer->GetInputSlot(0));
354
355 // Setup swizzled output
356 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
357 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
358
359 return *layer;
360}
361
362void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
363{
364 // Add swizzle layer
365 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
366 // Connect swizzled input to layer
367 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
368}
369
370armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
371{
372 // Add deswizzle layer
373 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
374 return deswizzleLayer;
375}
376
377// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
378armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
379 LayerInputHandle& input,
380 armnn::IConnectableLayer& firstLayer,
381 armnn::IConnectableLayer& lastLayer)
382{
383 SwizzleIn(network, input, firstLayer, 0);
384 return DeswizzleOut(network, lastLayer, 0);
385}
386
387// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
388armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
389 armnn::IConnectableLayer& layer)
390{
391 return SwizzleInDeswizzleOut(network, input, layer, layer);
392}
393
394bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
395 const armnn::TensorShape & outputShape,
396 uint32_t concatDim)
397{
398 // Validate the output shape is correct given the input shapes (which have just been validated)
399 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
400 if (outputShape.GetNumDimensions() != numDimensions)
401 {
402 return Fail("%s: Output shape has wrong number of dimensions", __func__);
403 }
404
405 unsigned int outputSizeAlongConcatenatedDimension = 0;
406 for (unsigned int i = 0; i < inputShapes.size(); i++)
407 {
408 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
409 }
410
411 for (unsigned int i = 0; i < numDimensions; ++i)
412 {
413 if (i == concatDim)
414 {
415 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
416 {
417 return Fail(
418 "%s: Invalid output shape for dimension %d (%d != %d)",
419 __func__,
420 i,
421 outputShape[i],
422 outputSizeAlongConcatenatedDimension);
423 }
424 }
425 else
426 {
427 if (outputShape[i] != inputShapes[0][i])
428 {
429 return Fail("%s: Invalid output shape", __func__);
430 }
431 }
432 }
433
434 return true;
435}
436
437bool RequiresReshape(armnn::TensorShape & inputShape)
438{
439 return inputShape.GetNumDimensions() < 3;
440}
441
arovir01b0717b52018-09-05 17:03:25 +0100442void SwizzleInputs(armnn::INetwork& network,
443 std::vector<LayerInputHandle>& inputs,
444 std::vector<armnn::TensorShape>& inputShapes,
445 const armnn::PermutationVector& mapping)
446{
447 if (!mapping.IsEqual(IdentityPermutation4D))
448 {
449 size_t nInputs = inputs.size();
450 for (size_t i=0; i<nInputs; ++i)
451 {
452 // add swizzle layer
453 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
454 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
455 auto& outputInfo = outputSlot.GetTensorInfo();
456 // replace inputs with the swizzled ones
457 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
458 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
459 }
460 }
461}
462
narpra01f176d5a2018-11-18 20:17:48 +0000463bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
464 int32_t & concatDimension,
465 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100466{
narpra01f176d5a2018-11-18 20:17:48 +0000467 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100468 BOOST_ASSERT(numberOfDimensions >= 3);
469
470 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000471 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
472 // or along dimension 0 or 2 for a 3-D tensor.
473 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100474 {
narpra01f176d5a2018-11-18 20:17:48 +0000475 concatDimension = 1;
476 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
477 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100478 }
narpra01f176d5a2018-11-18 20:17:48 +0000479 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100480 {
narpra01f176d5a2018-11-18 20:17:48 +0000481 concatDimension = 0;
482 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
483 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100484 }
narpra01f176d5a2018-11-18 20:17:48 +0000485 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100486}
487
488} // anonymous namespace
489
490namespace armnn_driver
491{
492
493//// Creates an ArmNN activation layer and connects it to the given layer, if the
494//// passed in AndroidNN activation function requires so.
495//// @return The end layer of the sequence of layers built for the given AndroidNN
496//// activation function or nullptr if an error occurred (e.g. unsupported activation).
497//// Note that the end layer matches the input layer if no activation is required
498//// (the sequence of layers has length 1).
499armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
500 ActivationFn activation,
501 armnn::IConnectableLayer* prevLayer,
502 ConversionData& data);
503
504} // namespace armnn_driver
505
506///
507/// Utility templates
508///
509
510namespace armnn_driver
511{
512
513using namespace android::nn;
514
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100515template<typename HalPolicy,
516 typename HalOperand = typename HalPolicy::Operand,
517 typename HalOperation = typename HalPolicy::Operation,
518 typename HalModel = typename HalPolicy::Model>
519const HalOperand* GetInputOperand(const HalOperation& operation,
520 uint32_t inputIndex,
521 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100522 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100523{
524 if (inputIndex >= operation.inputs.size())
525 {
saoste01b8471482018-10-10 09:44:51 +0100526 if (failOnIndexOutOfBounds)
527 {
528 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
529 }
arovir01b0717b52018-09-05 17:03:25 +0100530 return nullptr;
531 }
532
533 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
534 return &model.operands[operation.inputs[inputIndex]];
535}
536
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100537template<typename HalPolicy,
538 typename HalOperand = typename HalPolicy::Operand,
539 typename HalOperation = typename HalPolicy::Operation,
540 typename HalModel = typename HalPolicy::Model>
541const HalOperand* GetOutputOperand(const HalOperation& operation,
542 uint32_t outputIndex,
543 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100544{
545 if (outputIndex >= operation.outputs.size())
546 {
547 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
548 return nullptr;
549 }
550
551 // Model should have been validated beforehand
552 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
553
554 return &model.operands[operation.outputs[outputIndex]];
555}
556
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100557template<typename HalPolicy,
558 typename HalOperand = typename HalPolicy::Operand,
559 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100560const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100561 const HalModel& model,
562 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000563 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100564{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100565 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100566
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100567 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100568 switch (operand.lifetime)
569 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100570 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100571 {
572 // Constant found in model.operandValues
573 valueStart = &model.operandValues[operand.location.offset];
574 break;
575 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100576 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100577 {
578 // Constant specified via a Memory object
579 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
580 break;
581 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100582 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000583 {
584 // An optional input tensor with no values is not an error so should not register as a fail
585 if (optional)
586 {
587 valueStart = nullptr;
588 break;
589 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100590 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000591 }
arovir01b0717b52018-09-05 17:03:25 +0100592 default:
593 {
594 // Unsupported/invalid (e.g. can't get value of an input to the model)
595 Fail("%s: unsupported/invalid operand lifetime: %s",
596 __func__, toString(operand.lifetime).c_str());
597 valueStart = nullptr;
598 }
599 }
600
601 return valueStart;
602}
603
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100604template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100605 typename HalOperation = typename HalPolicy::Operation,
606 typename HalModel = typename HalPolicy::Model,
607 typename HalOperandType = typename HalPolicy::OperandType>
608bool GetOperandType(const HalOperation& operation,
609 uint32_t inputIndex,
610 const HalModel& model,
611 HalOperandType& type)
612{
613 using HalOperand = typename HalPolicy::Operand;
614
615 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
616 if (!operand)
617 {
618 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
619 }
620
621 type = operand->type;
622 return true;
623}
624
625template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100626 typename HalOperand = typename HalPolicy::Operand,
627 typename HalModel = typename HalPolicy::Model>
628ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
629 const HalModel& model,
630 const ConversionData& data,
631 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
632 const armnn::TensorShape* overrideTensorShape = nullptr,
633 bool optional = false)
634{
635 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
636
637 if (!IsOperandTypeSupportedForTensors(operand.type))
638 {
639 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
640 return ConstTensorPin();
641 }
642
643 if (!optional &&
644 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
645 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
646 operand.lifetime != HalOperandLifeTime::NO_VALUE)
647 {
648 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
649 return ConstTensorPin();
650 }
651
652 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
653 if (!valueStart)
654 {
655 if (optional)
656 {
657 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
658 return ConstTensorPin(true);
659 }
660 // mandatory tensor with no values
661 Fail("%s: failed to get operand address", __func__);
662 return ConstTensorPin();
663 }
664
665 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
666 if (overrideTensorShape != nullptr)
667 {
668 tensorInfo.SetShape(*overrideTensorShape);
669 }
670 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
671}
672
673template<typename HalPolicy,
674 typename HalOperation = typename HalPolicy::Operation,
675 typename HalModel = typename HalPolicy::Model>
676ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
677 uint32_t inputIndex,
678 const HalModel& model,
679 const ConversionData& data,
680 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
681 const armnn::TensorShape* overrideTensorShape = nullptr,
682 bool optional = false)
683{
684 using HalOperand = typename HalPolicy::Operand;
685
686 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
687 if (!operand)
688 {
689 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
690 return ConstTensorPin();
691 }
692 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
693 model,
694 data,
695 dimensionMappings,
696 overrideTensorShape,
697 optional);
698}
699
700template<typename HalPolicy,
701 typename OutputType,
702 typename HalOperandType = typename HalPolicy::OperandType,
703 typename HalOperation = typename HalPolicy::Operation,
704 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100705bool GetInputScalar(const HalOperation& operation,
706 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100707 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100708 OutputType& outValue,
709 const HalModel& model,
710 const ConversionData& data)
711{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100712 using HalOperand = typename HalPolicy::Operand;
713
714 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100715 if (!operand)
716 {
717 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
718 }
719
720 if (operand->type != type)
721 {
722 return Fail("%s: unexpected operand type: %s (should be %s)",
723 __func__, toString(operand->type).c_str(), toString(type).c_str());
724 }
725
726 if (operand->location.length != sizeof(OutputType))
727 {
728 return Fail("%s: incorrect operand location length: %i (should be %i)",
729 __func__, operand->location.length, sizeof(OutputType));
730 }
731
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100732 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100733 if (!valueAddress)
734 {
735 return Fail("%s: failed to get address for operand", __func__);
736 }
737
738 outValue = *(static_cast<const OutputType*>(valueAddress));
739 return true;
740}
741
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100742template<typename HalPolicy,
743 typename HalOperation = typename HalPolicy::Operation,
744 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100745bool GetInputInt32(const HalOperation& operation,
746 uint32_t inputIndex,
747 int32_t& outValue,
748 const HalModel& model,
749 const ConversionData& data)
750{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100751 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100752}
753
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100754template<typename HalPolicy,
755 typename HalOperation = typename HalPolicy::Operation,
756 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100757bool GetInputFloat32(const HalOperation& operation,
758 uint32_t inputIndex,
759 float& outValue,
760 const HalModel& model,
761 const ConversionData& data)
762{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100763 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100764}
765
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100766template<typename HalPolicy,
767 typename HalOperation = typename HalPolicy::Operation,
768 typename HalOperandType = typename HalPolicy::OperandType,
769 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100770bool GetInputActivationFunctionImpl(const HalOperation& operation,
771 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100772 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100773 ActivationFn& outActivationFunction,
774 const HalModel& model,
775 const ConversionData& data)
776{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100777 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100778 {
779 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
780 __func__,
781 toString(type).c_str(),
782 toString(OperandType::INT32).c_str(),
783 toString(OperandType::TENSOR_INT32).c_str());
784 }
785
786 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100787 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100788 {
789 return Fail("%s: failed to get activation input value", __func__);
790 }
791 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
792 return true;
793}
794
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100795template<typename HalPolicy,
796 typename HalOperation = typename HalPolicy::Operation,
797 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100798bool GetInputActivationFunction(const HalOperation& operation,
799 uint32_t inputIndex,
800 ActivationFn& outActivationFunction,
801 const HalModel& model,
802 const ConversionData& data)
803{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100804 return GetInputActivationFunctionImpl<HalPolicy>(operation,
805 inputIndex,
806 HalPolicy::OperandType::INT32,
807 outActivationFunction,
808 model,
809 data);
arovir01b0717b52018-09-05 17:03:25 +0100810}
811
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100812template<typename HalPolicy,
813 typename HalOperation = typename HalPolicy::Operation,
814 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100815bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
816 uint32_t inputIndex,
817 ActivationFn& outActivationFunction,
818 const HalModel& model,
819 const ConversionData& data)
820{
821 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100822 return GetInputActivationFunctionImpl<HalPolicy>(operation,
823 inputIndex,
824 HalPolicy::OperandType::INT32,
825 outActivationFunction,
826 model,
827 data);
arovir01b0717b52018-09-05 17:03:25 +0100828}
829
830
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100831template<typename HalPolicy,
832 typename HalOperation = typename HalPolicy::Operation,
833 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100834bool GetOptionalInputActivation(const HalOperation& operation,
835 uint32_t inputIndex,
836 ActivationFn& activationFunction,
837 const HalModel& model,
838 const ConversionData& data)
839{
840 if (operation.inputs.size() <= inputIndex)
841 {
842 activationFunction = ActivationFn::kActivationNone;
843 }
844 else
845 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100846 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100847 {
848 return Fail("%s: Operation has invalid inputs", __func__);
849 }
850 }
851 return true;
852}
853
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100854template<typename HalPolicy,
855 typename ConvolutionDescriptor,
856 typename HalOperation = typename HalPolicy::Operation,
857 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100858bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
859 uint32_t dilationXIndex,
860 ConvolutionDescriptor& descriptor,
861 const HalModel& model,
862 const ConversionData& data)
863{
864 bool success = true;
865 if (operation.inputs.size() >= dilationXIndex + 2)
866 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100867 success &= GetInputScalar<HalPolicy>(operation,
868 dilationXIndex,
869 HalPolicy::OperandType::INT32,
870 descriptor.m_DilationX,
871 model,
872 data);
873 success &= GetInputScalar<HalPolicy>(operation,
874 dilationXIndex + 1,
875 HalPolicy::OperandType::INT32,
876 descriptor.m_DilationY,
877 model,
878 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100879 }
880
881 return success;
882}
883
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100884template<typename HalPolicy,
885 typename HalOperand = typename HalPolicy::Operand,
886 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100887bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100888 std::vector<int32_t>& outValues,
889 const HalModel& model,
890 const ConversionData& data)
891{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100892 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100893 {
894 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
895 }
896
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100897 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100898 if (!startAddress)
899 {
900 return Fail("%s: failed to get operand address", __func__, operand.type);
901 }
902
903 // Check number of bytes is sensible
904 const uint32_t numBytes = operand.location.length;
905 if (numBytes % sizeof(int32_t) != 0)
906 {
907 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
908 __func__, numBytes, sizeof(int32_t));
909 }
910
911 outValues.resize(numBytes / sizeof(int32_t));
912 memcpy(outValues.data(), startAddress, numBytes);
913 return true;
914}
915
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100916template<typename HalPolicy,
917 typename HalOperation = typename HalPolicy::Operation,
918 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100919bool GetInputPaddingScheme(const HalOperation& operation,
920 uint32_t inputIndex,
921 PaddingScheme& outPaddingScheme,
922 const HalModel& model,
923 const ConversionData& data)
924{
925 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100926 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100927 {
928 return Fail("%s: failed to get padding scheme input value", __func__);
929 }
930
931 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
932 return true;
933}
934
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100935template<typename HalPolicy,
936 typename HalOperation = typename HalPolicy::Operation,
937 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100938LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
939 uint32_t inputIndex,
940 const HalModel& model,
941 ConversionData& data)
942{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100943 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +0100944 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100945 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
946
947 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100948 if (!operand)
949 {
950 Fail("%s: failed to get input operand %i", __func__, inputIndex);
951 return LayerInputHandle();
952 }
953
954 if (!IsOperandTypeSupportedForTensors(operand->type))
955 {
956 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
957 return LayerInputHandle();
958 }
959
Sadik Armagan44bcc022019-06-18 17:21:36 +0100960 try
arovir01b0717b52018-09-05 17:03:25 +0100961 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100962 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
arovir01b0717b52018-09-05 17:03:25 +0100963
Sadik Armagan44bcc022019-06-18 17:21:36 +0100964 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +0100965 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100966 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
967 case HalOperandLifeTime::MODEL_INPUT:
968 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +0100969 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100970 // The tensor is either an operand internal to the model, or a model input.
971 // It can be associated with an ArmNN output slot for an existing layer.
972
973 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
974 const uint32_t operandIndex = operation.inputs[inputIndex];
975 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
976 break;
977 }
978 case HalOperandLifeTime::CONSTANT_COPY:
979 case HalOperandLifeTime::CONSTANT_REFERENCE:
980 {
981 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
982 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
983 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +0100984 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100985 if (!IsLayerSupportedForAnyBackend(__func__,
986 armnn::IsConstantSupported,
987 data.m_Backends,
988 tensorPin.GetConstTensor().GetInfo()))
989 {
990 return LayerInputHandle();
991 }
992
993 armnn::IConnectableLayer* constantLayer =
994 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
995 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
996 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
997
998 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
999 }
1000 else
1001 {
1002 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001003 return LayerInputHandle();
1004 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001005 break;
arovir01b0717b52018-09-05 17:03:25 +01001006 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001007 default:
arovir01b0717b52018-09-05 17:03:25 +01001008 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001009 // Unsupported lifetime for an input tensor
1010 Fail("%s: unsupported lifetime for input tensor: %s",
1011 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001012 return LayerInputHandle();
1013 }
arovir01b0717b52018-09-05 17:03:25 +01001014 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001015 }
1016 catch (UnsupportedOperand<HalOperandType>& e)
1017 {
1018 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1019 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001020 }
1021}
1022
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001023template<typename HalPolicy,
1024 typename HalOperation = typename HalPolicy::Operation,
1025 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001026bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1027 uint32_t operationOutputIndex,
1028 armnn::IConnectableLayer& layer,
1029 uint32_t layerOutputIndex,
1030 const HalModel& model,
1031 ConversionData& data)
1032{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001033 using HalOperand = typename HalPolicy::Operand;
1034
1035 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001036 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1037 {
1038 return false;
1039 }
1040
1041 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1042
1043 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1044 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1045
1046 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1047
1048 return true;
1049}
1050
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001051template<typename HalPolicy,
1052 typename HalOperation = typename HalPolicy::Operation,
1053 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001054armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1055 uint32_t inputIndex,
1056 const HalModel& model,
1057 ConversionData& data)
1058{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001059 using HalOperand = typename HalPolicy::Operand;
1060
1061 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001062 if (!operand)
1063 {
1064 return armnn::DataLayout::NHWC;
1065 }
1066
1067 if (!IsBool(*operand))
1068 {
1069 return armnn::DataLayout::NHWC;
1070 }
1071
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001072 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001073 if (!valueAddress)
1074 {
1075 return armnn::DataLayout::NHWC;
1076 }
1077
1078 if (*(static_cast<const bool*>(valueAddress)))
1079 {
1080 return armnn::DataLayout::NCHW;
1081 }
1082 else
1083 {
1084 return armnn::DataLayout::NHWC;
1085 }
1086}
1087
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001088template<typename HalPolicy,
1089 typename HalOperation = typename HalPolicy::Operation,
1090 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001091bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1092 uint32_t outputIndex,
1093 armnn::IConnectableLayer& layer,
1094 const HalModel& model,
1095 ConversionData& data)
1096{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001097 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, outputIndex, layer, outputIndex, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001098}
1099
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001100template<typename HalPolicy,
1101 typename HalOperation = typename HalPolicy::Operation,
1102 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001103bool ConvertToActivation(const HalOperation& operation,
1104 const char* operationName,
1105 const armnn::ActivationDescriptor& activationDesc,
1106 const HalModel& model,
1107 ConversionData& data)
1108{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001109 using HalOperand = typename HalPolicy::Operand;
1110
1111 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001112 if (!input.IsValid())
1113 {
1114 return Fail("%s: Input 0 is invalid", operationName);
1115 }
1116
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001117 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001118 if (!outputOperand)
1119 {
1120 return false;
1121 }
1122 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001123 if (!IsLayerSupportedForAnyBackend(__func__,
1124 armnn::IsActivationSupported,
1125 data.m_Backends,
1126 input.GetTensorInfo(),
1127 outInfo,
1128 activationDesc))
arovir01b0717b52018-09-05 17:03:25 +01001129 {
1130 return false;
1131 }
1132
1133 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1134 BOOST_ASSERT(layer != nullptr);
1135 input.Connect(layer->GetInputSlot(0));
1136
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001137 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001138}
1139
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001140template<typename HalPolicy,
1141 typename HalOperation = typename HalPolicy::Operation,
1142 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001143bool ConvertPaddings(const HalOperation& operation,
1144 const HalModel& model,
1145 ConversionData& data,
1146 unsigned int rank,
1147 armnn::PadDescriptor& padDescriptor)
1148{
1149 using HalOperand = typename HalPolicy::Operand;
1150
1151 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1152 if (!paddingsOperand)
1153 {
1154 return Fail("%s: Could not read paddings operand", __func__);
1155 }
1156
1157 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1158 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1159 {
1160 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1161 }
1162
1163 std::vector<int32_t> paddings;
1164 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1165
1166 // add padding for each dimension of input tensor.
1167 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1168 {
1169 int paddingBeforeInput = paddings[i];
1170 int paddingAfterInput = paddings[i + 1];
1171
1172 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1173 {
1174 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1175 }
1176
1177 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1178 }
1179
1180 return true;
1181}
1182
1183template<typename HalPolicy,
1184 typename HalOperation = typename HalPolicy::Operation,
1185 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001186bool ConvertPooling2d(const HalOperation& operation,
1187 const char* operationName,
1188 armnn::PoolingAlgorithm poolType,
1189 const HalModel& model,
1190 ConversionData& data)
1191{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001192 using HalOperand = typename HalPolicy::Operand;
1193 using HalOperandType = typename HalPolicy::OperandType;
1194
1195 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001196 if (!input.IsValid())
1197 {
1198 return Fail("%s: Could not read input 0", operationName);
1199 }
1200
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001201 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001202 if (!output)
1203 {
1204 return Fail("%s: Could not read output 0", __func__);
1205 }
1206
1207 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1208 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1209
arovir01b0717b52018-09-05 17:03:25 +01001210 armnn::Pooling2dDescriptor desc;
1211 desc.m_PoolType = poolType;
1212 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001213 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001214
1215 ActivationFn activation;
1216
1217 if (operation.inputs.size() == 7)
1218 {
1219 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1220 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001221 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1222 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1223 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1224 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1225 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1226 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001227 {
1228 return Fail("%s: Operation has invalid inputs", operationName);
1229 }
1230
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001231 const unsigned int inputWidth = inputInfo.GetShape()[2];
1232 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001233
1234 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1235 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1236 }
1237 else
1238 {
1239 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001240 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1241 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1242 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1243 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1244 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1245 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1246 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1247 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1248 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001249 {
1250 return Fail("%s: Operation has invalid inputs", operationName);
1251 }
1252 }
1253
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001254 if (!IsLayerSupportedForAnyBackend(__func__,
1255 armnn::IsPooling2dSupported,
1256 data.m_Backends,
1257 inputInfo,
1258 outputInfo,
1259 desc))
arovir01b0717b52018-09-05 17:03:25 +01001260 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001261 return false;
arovir01b0717b52018-09-05 17:03:25 +01001262 }
arovir01b0717b52018-09-05 17:03:25 +01001263
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001264 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1265 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001266 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001267 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001268 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001269
1270 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1271 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001272 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001273 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001274 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001275
1276 input.Connect(pooling2dLayer->GetInputSlot(0));
1277
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001278 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001279}
1280
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001281template<typename HalPolicy,
1282 typename HalOperation = typename HalPolicy::Operation,
1283 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001284bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1285{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001286 using HalOperand = typename HalPolicy::Operand;
1287 using HalOperandType = typename HalPolicy::OperandType;
1288
1289 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001290 if (!input.IsValid())
1291 {
1292 return Fail("%s: Operation has invalid inputs", __func__);
1293 }
1294
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001295 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001296 if (!output)
1297 {
1298 return Fail("%s: Could not read output 0", __func__);
1299 }
1300
1301 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1302 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1303
1304 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001305 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1306 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001307
1308 if (!weightsPin.IsValid() || !biasPin.IsValid())
1309 {
1310 return Fail("%s: Operation has invalid inputs", __func__);
1311 }
1312
1313 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1314 armnn::ConstTensor bias = biasPin.GetConstTensor();
1315 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1316
1317 armnn::Convolution2dDescriptor desc;
1318 desc.m_DataLayout = armnn::DataLayout::NHWC;
1319 ActivationFn activation;
1320
1321 if (operation.inputs.size() >= 10)
1322 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001323 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1324 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1325 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1326 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1327 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1328 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1329 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data) ||
1330 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 11, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001331 {
1332 return Fail("%s: Operation has invalid inputs", __func__);
1333 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001334 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001335 }
1336 else if (operation.inputs.size() >= 7)
1337 {
1338 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001339 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1340 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1341 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1342 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data) ||
1343 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 8, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001344 {
1345 return Fail("%s: Operation has invalid inputs", __func__);
1346 }
1347
1348 const uint32_t kernelX = weights.GetShape()[2];
1349 const uint32_t kernelY = weights.GetShape()[1];
1350 const uint32_t inputX = inputInfo.GetShape()[2];
1351 const uint32_t inputY = inputInfo.GetShape()[1];
1352
1353 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1354 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1355
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001356 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001357 }
1358 else
1359 {
1360 return Fail("%s: Unsupported number of operation inputs", __func__);
1361 }
1362
1363 desc.m_BiasEnabled = true;
1364 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1365
1366 if (!IsLayerSupportedForAnyBackend(__func__,
1367 armnn::IsConvolution2dSupported,
1368 data.m_Backends,
1369 inputInfo,
1370 outputInfo,
1371 desc,
1372 weights.GetInfo(),
1373 biases))
1374 {
1375 return false;
1376 }
1377
1378 armnn::IConnectableLayer* startLayer =
1379 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1380
1381 if (!startLayer)
1382 {
1383 return Fail("%s: AddConvolution2dLayer failed", __func__);
1384 }
1385
1386 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1387
1388 if (!endLayer)
1389 {
1390 return Fail("%s: ProcessActivation failed", __func__);
1391 }
1392
1393 input.Connect(startLayer->GetInputSlot(0));
1394
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001395 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001396}
1397
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001398template<typename HalPolicy,
1399 typename HalOperation = typename HalPolicy::Operation,
1400 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001401bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1402{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001403 using HalOperand = typename HalPolicy::Operand;
1404 using HalOperandType = typename HalPolicy::OperandType;
1405
1406 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001407
1408 if (!input.IsValid())
1409 {
1410 return Fail("%s: Operation has invalid inputs", __func__);
1411 }
1412
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001413 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001414
1415 if (!output)
1416 {
1417 return Fail("%s: Could not read output 0", __func__);
1418 }
1419
1420 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1421 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1422
1423 // ArmNN does not currently support non-fixed weights or bias
1424
1425 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001426 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001427
1428 if (weightsOperand == nullptr)
1429 {
1430 return Fail("%s: Operand is invalid", __func__);
1431 }
1432 armnn::DepthwiseConvolution2dDescriptor desc;
1433 desc.m_DataLayout = armnn::DataLayout::NHWC;
1434
1435 // Look ahead to find the optional DataLayout, if present
1436 if (operation.inputs.size() >= 12)
1437 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001438 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 11, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001439 }
1440 else if (operation.inputs.size() >= 9)
1441 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001442 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 8, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001443 }
1444
1445 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1446 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
1447 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1448 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1449
1450 // Reinterpret weight data as [ H, W, I, M ]
1451 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1452 weightsOperand->dimensions[2],
1453 inputInfo.GetShape()[channelsIndex],
1454 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
1455
1456 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1457 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1458
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001459 const ConstTensorPin weightsPin =
1460 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1461 1,
1462 model,
1463 data,
1464 HWIMToMIHW,
1465 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001466
1467 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001468 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001469
1470 if (!weightsPin.IsValid() || !biasPin.IsValid())
1471 {
1472 return Fail("%s: Operation has invalid inputs", __func__);
1473 }
1474
1475 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1476 armnn::ConstTensor bias = biasPin.GetConstTensor();
1477 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1478
1479 ActivationFn activation;
1480
1481 if (operation.inputs.size() >= 11)
1482 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001483 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1484 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1485 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1486 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1487 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1488 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1489 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data) ||
1490 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 12, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001491 {
1492 return Fail("%s: Operation has invalid inputs", __func__);
1493 }
1494 }
1495 else if (operation.inputs.size() >= 8)
1496 {
1497 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001498 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1499 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1500 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1501 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data) ||
1502 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 9, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001503 {
1504 return Fail("%s: Operation has invalid inputs", __func__);
1505 }
1506
1507 const uint32_t kernelX = weights.GetShape()[3];
1508 const uint32_t kernelY = weights.GetShape()[2];
1509 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1510 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1511
1512 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1513 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1514 }
1515 else
1516 {
1517 return Fail("%s: Unsupported number of operation inputs", __func__);
1518 }
1519
1520 desc.m_BiasEnabled = true;
1521 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1522
1523 if (!IsLayerSupportedForAnyBackend(__func__,
1524 armnn::IsDepthwiseConvolutionSupported,
1525 data.m_Backends,
1526 inputInfo,
1527 outputInfo,
1528 desc,
1529 weights.GetInfo(),
1530 biases))
1531 {
1532 return false;
1533 }
1534
1535 armnn::IConnectableLayer* startLayer =
1536 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1537 if (!startLayer)
1538 {
1539 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1540 }
1541
1542 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1543 if (!endLayer)
1544 {
1545 return Fail("%s: ProcessActivation failed", __func__);
1546 }
1547
1548 input.Connect(startLayer->GetInputSlot(0));
1549
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001550 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001551}
1552
saoste01b8471482018-10-10 09:44:51 +01001553} // namespace armnn_driver