blob: c59da1d5fda67773306c10b07b561d09a1b96187 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01008#include "Utils.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010#include <armnn/ArmNN.hpp>
11
Mike Kellyb5fdf382019-06-11 16:35:25 +010012#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010013#include "armnn/src/armnnUtils/Permute.hpp"
arovir01b0717b52018-09-05 17:03:25 +010014
15#include <ActivationFunctor.h>
16#include <CpuExecutor.h>
17#include <OperationsUtils.h>
18
19#include <boost/assert.hpp>
20#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010021#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010022#include <boost/test/tools/floating_point_comparison.hpp>
23
24#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010025#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010026
27namespace armnn_driver
28{
29
30///
31/// Helper classes
32///
33
34struct ConversionData
35{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010036 ConversionData(const std::vector<armnn::BackendId>& backends)
37 : m_Backends(backends)
38 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010039 {}
40
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010041 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010042 armnn::INetworkPtr m_Network;
43 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
44 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
45};
46
47class LayerInputHandle
48{
49public:
50 LayerInputHandle();
51 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
52
53 bool IsValid() const;
54
55 void Connect(armnn::IInputSlot& inputSlot);
56
57 const armnn::TensorInfo& GetTensorInfo() const;
58
59private:
60 armnn::IOutputSlot* m_OutputSlot;
61 bool m_Valid;
62 armnn::TensorInfo m_TensorInfo;
63};
64
65class ConstTensorPin
66{
67public:
68 // Creates an invalid tensor pin (can be used to signal errors)
69 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
70 ConstTensorPin(bool optional = false);
71
72 // @param tensorInfo TensorInfo associated with the tensor.
73 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
74 // the model being converted.
75 // @param numBytes Number of bytes for the tensor data.
76 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
77 const armnn::PermutationVector& mappings);
78
79 ConstTensorPin(const ConstTensorPin& other) = delete;
80 ConstTensorPin(ConstTensorPin&& other) = default;
81
82 bool IsValid() const;
83 bool IsOptional() const;
84
85 const armnn::ConstTensor& GetConstTensor() const;
86 const armnn::ConstTensor* GetConstTensorPtr() const;
87
88private:
89 armnn::ConstTensor m_ConstTensor;
90
91 // Owned memory for swizzled tensor data, only required if the tensor needed
92 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
93 // the pools associated with the model being converted.
94 std::vector<uint8_t> m_SwizzledTensorData;
95
96 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
97 bool m_Optional;
98};
99
100} // namespace armnn_driver
101
102///
103/// Utility functions
104///
105
106namespace
107{
108
109using namespace armnn_driver;
110using namespace android::nn;
111
112// Convenience function to log the reason for failing to convert a model.
113// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
114template<class... Args>
115static bool Fail(const char* formatStr, Args&&... args)
116{
117 ALOGD(formatStr, std::forward<Args>(args)...);
118 return false;
119}
120
121// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
122// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
123template<typename IsLayerSupportedFunc, typename ... Args>
124bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
125{
126 std::vector<char> unsupportedReason(1024+1);
127 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
128 if(isSupported)
129 {
130 return true;
131 }
132 else
133 {
134 std::string sUnsupportedReason(unsupportedReason.data());
135 if (sUnsupportedReason.size() > 0)
136 {
137 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
138 } else
139 {
140 ALOGD("%s: not supported by armnn", funcName);
141 }
142 return false;
143 }
144}
145
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100146template<typename IsLayerSupportedFunc, typename ... Args>
147bool IsLayerSupportedForAnyBackend(const char* funcName,
148 IsLayerSupportedFunc f,
149 const std::vector<armnn::BackendId>& backends,
150 Args&&... args)
151{
152 for (auto&& backend : backends)
153 {
154 if (IsLayerSupported(funcName, f, backend, std::forward<Args>(args)...))
155 {
156 return true;
157 }
158 }
159
160 ALOGD("%s: not supported by any specified backend", funcName);
161 return false;
162}
163
Mike Kellyb5fdf382019-06-11 16:35:25 +0100164template<typename Operand>
165armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100166{
167 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
168}
169
Matthew Bentham912b3622019-05-03 15:49:14 +0100170inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100171{
Matthew Bentham912b3622019-05-03 15:49:14 +0100172 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
173 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
174 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100175}
176
Mike Kellyb5fdf382019-06-11 16:35:25 +0100177#ifdef ARMNN_ANDROID_NN_V1_2
178
179inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
180{
181 return type == V1_2::OperandType::BOOL ||
182 type == V1_2::OperandType::TENSOR_FLOAT16 ||
183 type == V1_2::OperandType::TENSOR_FLOAT32 ||
184 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
185 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
186 type == V1_2::OperandType::TENSOR_INT32;
187}
188
189#endif
190
191inline bool IsBool(V1_0::Operand)
192{
193 return false;
194}
195
196#ifdef ARMNN_ANDROID_NN_V1_2
197
198inline bool IsBool(V1_2::Operand operand)
199{
200 return operand.type == V1_2::OperandType::BOOL;
201}
202
203#endif
204
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100205template<typename LayerHandleType>
206armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
207 armnn::TensorInfo reshapeInfo)
208{
209 armnn::ReshapeDescriptor reshapeDescriptor;
210 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
211
212 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
213 BOOST_ASSERT(reshapeLayer != nullptr);
214
215 // Attach the input layer to the reshape layer
216 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
217 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
218
219 return *reshapeLayer;
220}
221
222void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
223 armnn::IConnectableLayer* startLayer, armnn::INetwork& network)
arovir01b0717b52018-09-05 17:03:25 +0100224{
225 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100226
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100227 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
228 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
229
230 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
231 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
232
233 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100234 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100235 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
236 input0.Connect(startLayer->GetInputSlot(0));
237 input1.Connect(startLayer->GetInputSlot(1));
238
239 return;
240 }
241
242 // Since the number of dimensions do not match then we need to add degenerate dimensions
243 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
244
245 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
246 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
247 boost::numeric_cast<int>(inputDimensions1));
248
249 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
250 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
251 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
252
253 const armnn::TensorShape& smallShape = smallInfo.GetShape();
254 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
255 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
256 {
257 reshapedDimensions[i] = smallShape[i - sizeDifference];
258 }
259
260 armnn::TensorInfo reshapedInfo = smallInfo;
261 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
262 reshapedDimensions.data() });
263 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(network, smallInputHandle, reshapedInfo);
264
265 if (input0IsSmaller)
266 {
267 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
268 //
269 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100270 // | |
271 // Reshape |
272 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100273 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100274
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100275 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
276 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100277 }
278 else
279 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100280 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
281 //
282 // Input0 Input1
283 // | |
284 // | Reshape
285 // \ /
286 // StartLayer
287
arovir01b0717b52018-09-05 17:03:25 +0100288 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100289 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100290 }
291}
292
293void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
294 android::nn::PaddingScheme scheme)
295{
296 int32_t padHead;
297 int32_t padTail;
298 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
299 outPadHead = boost::numeric_cast<uint32_t>(padHead);
300 outPadTail = boost::numeric_cast<uint32_t>(padTail);
301}
302
Mike Kelly86b36d42019-07-12 16:39:33 +0100303#ifdef ARMNN_ANDROID_NN_V1_2
304
305void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
306 uint32_t& outPadTail, android::nn::PaddingScheme scheme)
307{
308 int32_t padHead;
309 int32_t padTail;
310 calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
311 outPadHead = boost::numeric_cast<uint32_t>(padHead);
312 outPadTail = boost::numeric_cast<uint32_t>(padTail);
313}
314
315#endif
316
Matthew Bentham912b3622019-05-03 15:49:14 +0100317Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100318{
319 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100320 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100321 shape.dimensions = operand.dimensions;
322 shape.scale = operand.scale;
323 shape.offset = operand.zeroPoint;
324 return shape;
325}
326
327// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
328// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
329// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
330// (us, in this case) to ensure they match.
331void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
332 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
333{
334 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
335 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
336 {
337 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
338 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
339 {
340 ALOGW("Bias quantization scale has been modified to match input*weights");
341 biasInfo.SetQuantizationScale(expectedBiasScale);
342 }
343 }
344}
345
346// 4D Tensor Permutations
347const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
348const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
349const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
350const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
351
352// 3D Permutation Vectors
353const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
354const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
355const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
356
357template<typename OSlot>
358armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
359 const armnn::PermutationVector& mappings)
360{
361 // Add swizzle layer
362 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
363
364 BOOST_ASSERT(layer != nullptr);
365
366 // Connect input to swizzle layer
367 input.Connect(layer->GetInputSlot(0));
368
369 // Setup swizzled output
370 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
371 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
372
373 return *layer;
374}
375
376void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
377{
378 // Add swizzle layer
379 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
380 // Connect swizzled input to layer
381 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
382}
383
384armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
385{
386 // Add deswizzle layer
387 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
388 return deswizzleLayer;
389}
390
391// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
392armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
393 LayerInputHandle& input,
394 armnn::IConnectableLayer& firstLayer,
395 armnn::IConnectableLayer& lastLayer)
396{
397 SwizzleIn(network, input, firstLayer, 0);
398 return DeswizzleOut(network, lastLayer, 0);
399}
400
401// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
402armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
403 armnn::IConnectableLayer& layer)
404{
405 return SwizzleInDeswizzleOut(network, input, layer, layer);
406}
407
408bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
409 const armnn::TensorShape & outputShape,
410 uint32_t concatDim)
411{
412 // Validate the output shape is correct given the input shapes (which have just been validated)
413 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
414 if (outputShape.GetNumDimensions() != numDimensions)
415 {
416 return Fail("%s: Output shape has wrong number of dimensions", __func__);
417 }
418
419 unsigned int outputSizeAlongConcatenatedDimension = 0;
420 for (unsigned int i = 0; i < inputShapes.size(); i++)
421 {
422 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
423 }
424
425 for (unsigned int i = 0; i < numDimensions; ++i)
426 {
427 if (i == concatDim)
428 {
429 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
430 {
431 return Fail(
432 "%s: Invalid output shape for dimension %d (%d != %d)",
433 __func__,
434 i,
435 outputShape[i],
436 outputSizeAlongConcatenatedDimension);
437 }
438 }
439 else
440 {
441 if (outputShape[i] != inputShapes[0][i])
442 {
443 return Fail("%s: Invalid output shape", __func__);
444 }
445 }
446 }
447
448 return true;
449}
450
451bool RequiresReshape(armnn::TensorShape & inputShape)
452{
453 return inputShape.GetNumDimensions() < 3;
454}
455
arovir01b0717b52018-09-05 17:03:25 +0100456void SwizzleInputs(armnn::INetwork& network,
457 std::vector<LayerInputHandle>& inputs,
458 std::vector<armnn::TensorShape>& inputShapes,
459 const armnn::PermutationVector& mapping)
460{
461 if (!mapping.IsEqual(IdentityPermutation4D))
462 {
463 size_t nInputs = inputs.size();
464 for (size_t i=0; i<nInputs; ++i)
465 {
466 // add swizzle layer
467 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
468 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
469 auto& outputInfo = outputSlot.GetTensorInfo();
470 // replace inputs with the swizzled ones
471 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
472 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
473 }
474 }
475}
476
narpra01f176d5a2018-11-18 20:17:48 +0000477bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
478 int32_t & concatDimension,
479 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100480{
narpra01f176d5a2018-11-18 20:17:48 +0000481 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100482 BOOST_ASSERT(numberOfDimensions >= 3);
483
484 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000485 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
486 // or along dimension 0 or 2 for a 3-D tensor.
487 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100488 {
narpra01f176d5a2018-11-18 20:17:48 +0000489 concatDimension = 1;
490 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
491 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100492 }
narpra01f176d5a2018-11-18 20:17:48 +0000493 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100494 {
narpra01f176d5a2018-11-18 20:17:48 +0000495 concatDimension = 0;
496 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
497 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100498 }
narpra01f176d5a2018-11-18 20:17:48 +0000499 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100500}
501
502} // anonymous namespace
503
504namespace armnn_driver
505{
506
507//// Creates an ArmNN activation layer and connects it to the given layer, if the
508//// passed in AndroidNN activation function requires so.
509//// @return The end layer of the sequence of layers built for the given AndroidNN
510//// activation function or nullptr if an error occurred (e.g. unsupported activation).
511//// Note that the end layer matches the input layer if no activation is required
512//// (the sequence of layers has length 1).
513armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
514 ActivationFn activation,
515 armnn::IConnectableLayer* prevLayer,
516 ConversionData& data);
517
518} // namespace armnn_driver
519
520///
521/// Utility templates
522///
523
524namespace armnn_driver
525{
526
527using namespace android::nn;
528
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100529template<typename HalPolicy,
530 typename HalOperand = typename HalPolicy::Operand,
531 typename HalOperation = typename HalPolicy::Operation,
532 typename HalModel = typename HalPolicy::Model>
533const HalOperand* GetInputOperand(const HalOperation& operation,
534 uint32_t inputIndex,
535 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100536 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100537{
538 if (inputIndex >= operation.inputs.size())
539 {
saoste01b8471482018-10-10 09:44:51 +0100540 if (failOnIndexOutOfBounds)
541 {
542 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
543 }
arovir01b0717b52018-09-05 17:03:25 +0100544 return nullptr;
545 }
546
547 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
548 return &model.operands[operation.inputs[inputIndex]];
549}
550
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100551template<typename HalPolicy,
552 typename HalOperand = typename HalPolicy::Operand,
553 typename HalOperation = typename HalPolicy::Operation,
554 typename HalModel = typename HalPolicy::Model>
555const HalOperand* GetOutputOperand(const HalOperation& operation,
556 uint32_t outputIndex,
557 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100558{
559 if (outputIndex >= operation.outputs.size())
560 {
561 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
562 return nullptr;
563 }
564
565 // Model should have been validated beforehand
566 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
567
568 return &model.operands[operation.outputs[outputIndex]];
569}
570
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100571template<typename HalPolicy,
572 typename HalOperand = typename HalPolicy::Operand,
573 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100574const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100575 const HalModel& model,
576 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000577 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100578{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100579 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100580
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100581 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100582 switch (operand.lifetime)
583 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100584 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100585 {
586 // Constant found in model.operandValues
587 valueStart = &model.operandValues[operand.location.offset];
588 break;
589 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100590 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100591 {
592 // Constant specified via a Memory object
593 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
594 break;
595 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100596 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000597 {
598 // An optional input tensor with no values is not an error so should not register as a fail
599 if (optional)
600 {
601 valueStart = nullptr;
602 break;
603 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100604 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000605 }
arovir01b0717b52018-09-05 17:03:25 +0100606 default:
607 {
608 // Unsupported/invalid (e.g. can't get value of an input to the model)
609 Fail("%s: unsupported/invalid operand lifetime: %s",
610 __func__, toString(operand.lifetime).c_str());
611 valueStart = nullptr;
612 }
613 }
614
615 return valueStart;
616}
617
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100618template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100619 typename HalOperation = typename HalPolicy::Operation,
620 typename HalModel = typename HalPolicy::Model,
621 typename HalOperandType = typename HalPolicy::OperandType>
622bool GetOperandType(const HalOperation& operation,
623 uint32_t inputIndex,
624 const HalModel& model,
625 HalOperandType& type)
626{
627 using HalOperand = typename HalPolicy::Operand;
628
629 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
630 if (!operand)
631 {
632 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
633 }
634
635 type = operand->type;
636 return true;
637}
638
639template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100640 typename HalOperand = typename HalPolicy::Operand,
641 typename HalModel = typename HalPolicy::Model>
642ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
643 const HalModel& model,
644 const ConversionData& data,
645 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
646 const armnn::TensorShape* overrideTensorShape = nullptr,
647 bool optional = false)
648{
649 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
650
651 if (!IsOperandTypeSupportedForTensors(operand.type))
652 {
653 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
654 return ConstTensorPin();
655 }
656
657 if (!optional &&
658 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
659 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
660 operand.lifetime != HalOperandLifeTime::NO_VALUE)
661 {
662 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
663 return ConstTensorPin();
664 }
665
666 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
667 if (!valueStart)
668 {
669 if (optional)
670 {
671 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
672 return ConstTensorPin(true);
673 }
674 // mandatory tensor with no values
675 Fail("%s: failed to get operand address", __func__);
676 return ConstTensorPin();
677 }
678
679 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
680 if (overrideTensorShape != nullptr)
681 {
682 tensorInfo.SetShape(*overrideTensorShape);
683 }
684 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
685}
686
687template<typename HalPolicy,
688 typename HalOperation = typename HalPolicy::Operation,
689 typename HalModel = typename HalPolicy::Model>
690ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
691 uint32_t inputIndex,
692 const HalModel& model,
693 const ConversionData& data,
694 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
695 const armnn::TensorShape* overrideTensorShape = nullptr,
696 bool optional = false)
697{
698 using HalOperand = typename HalPolicy::Operand;
699
700 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
701 if (!operand)
702 {
703 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
704 return ConstTensorPin();
705 }
706 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
707 model,
708 data,
709 dimensionMappings,
710 overrideTensorShape,
711 optional);
712}
713
714template<typename HalPolicy,
715 typename OutputType,
716 typename HalOperandType = typename HalPolicy::OperandType,
717 typename HalOperation = typename HalPolicy::Operation,
718 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100719bool GetInputScalar(const HalOperation& operation,
720 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100721 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100722 OutputType& outValue,
723 const HalModel& model,
724 const ConversionData& data)
725{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100726 using HalOperand = typename HalPolicy::Operand;
727
728 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100729 if (!operand)
730 {
731 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
732 }
733
734 if (operand->type != type)
735 {
736 return Fail("%s: unexpected operand type: %s (should be %s)",
737 __func__, toString(operand->type).c_str(), toString(type).c_str());
738 }
739
740 if (operand->location.length != sizeof(OutputType))
741 {
742 return Fail("%s: incorrect operand location length: %i (should be %i)",
743 __func__, operand->location.length, sizeof(OutputType));
744 }
745
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100746 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100747 if (!valueAddress)
748 {
749 return Fail("%s: failed to get address for operand", __func__);
750 }
751
752 outValue = *(static_cast<const OutputType*>(valueAddress));
753 return true;
754}
755
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100756template<typename HalPolicy,
757 typename HalOperation = typename HalPolicy::Operation,
758 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100759bool GetInputInt32(const HalOperation& operation,
760 uint32_t inputIndex,
761 int32_t& outValue,
762 const HalModel& model,
763 const ConversionData& data)
764{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100765 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100766}
767
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100768template<typename HalPolicy,
769 typename HalOperation = typename HalPolicy::Operation,
770 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100771bool GetInputFloat32(const HalOperation& operation,
772 uint32_t inputIndex,
773 float& outValue,
774 const HalModel& model,
775 const ConversionData& data)
776{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100777 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100778}
779
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100780template<typename HalPolicy,
781 typename HalOperation = typename HalPolicy::Operation,
782 typename HalOperandType = typename HalPolicy::OperandType,
783 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100784bool GetInputActivationFunctionImpl(const HalOperation& operation,
785 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100786 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100787 ActivationFn& outActivationFunction,
788 const HalModel& model,
789 const ConversionData& data)
790{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100791 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100792 {
793 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
794 __func__,
795 toString(type).c_str(),
796 toString(OperandType::INT32).c_str(),
797 toString(OperandType::TENSOR_INT32).c_str());
798 }
799
800 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100801 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100802 {
803 return Fail("%s: failed to get activation input value", __func__);
804 }
805 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
806 return true;
807}
808
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100809template<typename HalPolicy,
810 typename HalOperation = typename HalPolicy::Operation,
811 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100812bool GetInputActivationFunction(const HalOperation& operation,
813 uint32_t inputIndex,
814 ActivationFn& outActivationFunction,
815 const HalModel& model,
816 const ConversionData& data)
817{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100818 return GetInputActivationFunctionImpl<HalPolicy>(operation,
819 inputIndex,
820 HalPolicy::OperandType::INT32,
821 outActivationFunction,
822 model,
823 data);
arovir01b0717b52018-09-05 17:03:25 +0100824}
825
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100826template<typename HalPolicy,
827 typename HalOperation = typename HalPolicy::Operation,
828 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100829bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
830 uint32_t inputIndex,
831 ActivationFn& outActivationFunction,
832 const HalModel& model,
833 const ConversionData& data)
834{
835 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100836 return GetInputActivationFunctionImpl<HalPolicy>(operation,
837 inputIndex,
838 HalPolicy::OperandType::INT32,
839 outActivationFunction,
840 model,
841 data);
arovir01b0717b52018-09-05 17:03:25 +0100842}
843
844
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100845template<typename HalPolicy,
846 typename HalOperation = typename HalPolicy::Operation,
847 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100848bool GetOptionalInputActivation(const HalOperation& operation,
849 uint32_t inputIndex,
850 ActivationFn& activationFunction,
851 const HalModel& model,
852 const ConversionData& data)
853{
854 if (operation.inputs.size() <= inputIndex)
855 {
856 activationFunction = ActivationFn::kActivationNone;
857 }
858 else
859 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100860 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100861 {
862 return Fail("%s: Operation has invalid inputs", __func__);
863 }
864 }
865 return true;
866}
867
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100868template<typename HalPolicy,
869 typename ConvolutionDescriptor,
870 typename HalOperation = typename HalPolicy::Operation,
871 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100872bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
873 uint32_t dilationXIndex,
874 ConvolutionDescriptor& descriptor,
875 const HalModel& model,
876 const ConversionData& data)
877{
878 bool success = true;
879 if (operation.inputs.size() >= dilationXIndex + 2)
880 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100881 success &= GetInputScalar<HalPolicy>(operation,
882 dilationXIndex,
883 HalPolicy::OperandType::INT32,
884 descriptor.m_DilationX,
885 model,
886 data);
887 success &= GetInputScalar<HalPolicy>(operation,
888 dilationXIndex + 1,
889 HalPolicy::OperandType::INT32,
890 descriptor.m_DilationY,
891 model,
892 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100893 }
894
895 return success;
896}
897
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100898template<typename HalPolicy,
899 typename HalOperand = typename HalPolicy::Operand,
900 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100901bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100902 std::vector<int32_t>& outValues,
903 const HalModel& model,
904 const ConversionData& data)
905{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100906 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100907 {
908 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
909 }
910
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100911 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100912 if (!startAddress)
913 {
914 return Fail("%s: failed to get operand address", __func__, operand.type);
915 }
916
917 // Check number of bytes is sensible
918 const uint32_t numBytes = operand.location.length;
919 if (numBytes % sizeof(int32_t) != 0)
920 {
921 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
922 __func__, numBytes, sizeof(int32_t));
923 }
924
925 outValues.resize(numBytes / sizeof(int32_t));
926 memcpy(outValues.data(), startAddress, numBytes);
927 return true;
928}
929
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100930template<typename HalPolicy,
931 typename HalOperation = typename HalPolicy::Operation,
932 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100933bool GetInputPaddingScheme(const HalOperation& operation,
934 uint32_t inputIndex,
935 PaddingScheme& outPaddingScheme,
936 const HalModel& model,
937 const ConversionData& data)
938{
939 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100940 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100941 {
942 return Fail("%s: failed to get padding scheme input value", __func__);
943 }
944
945 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
946 return true;
947}
948
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100949template<typename HalPolicy,
950 typename HalOperation = typename HalPolicy::Operation,
951 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100952LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
953 uint32_t inputIndex,
954 const HalModel& model,
955 ConversionData& data)
956{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100957 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +0100958 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100959 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
960
961 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100962 if (!operand)
963 {
964 Fail("%s: failed to get input operand %i", __func__, inputIndex);
965 return LayerInputHandle();
966 }
967
968 if (!IsOperandTypeSupportedForTensors(operand->type))
969 {
970 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
971 return LayerInputHandle();
972 }
973
Sadik Armagan44bcc022019-06-18 17:21:36 +0100974 try
arovir01b0717b52018-09-05 17:03:25 +0100975 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100976 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
arovir01b0717b52018-09-05 17:03:25 +0100977
Sadik Armagan44bcc022019-06-18 17:21:36 +0100978 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +0100979 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100980 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
981 case HalOperandLifeTime::MODEL_INPUT:
982 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +0100983 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100984 // The tensor is either an operand internal to the model, or a model input.
985 // It can be associated with an ArmNN output slot for an existing layer.
986
987 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
988 const uint32_t operandIndex = operation.inputs[inputIndex];
989 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
990 break;
991 }
992 case HalOperandLifeTime::CONSTANT_COPY:
993 case HalOperandLifeTime::CONSTANT_REFERENCE:
994 {
995 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
996 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
997 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +0100998 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100999 if (!IsLayerSupportedForAnyBackend(__func__,
1000 armnn::IsConstantSupported,
1001 data.m_Backends,
1002 tensorPin.GetConstTensor().GetInfo()))
1003 {
1004 return LayerInputHandle();
1005 }
1006
1007 armnn::IConnectableLayer* constantLayer =
1008 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1009 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1010 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1011
1012 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1013 }
1014 else
1015 {
1016 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001017 return LayerInputHandle();
1018 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001019 break;
arovir01b0717b52018-09-05 17:03:25 +01001020 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001021 default:
arovir01b0717b52018-09-05 17:03:25 +01001022 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001023 // Unsupported lifetime for an input tensor
1024 Fail("%s: unsupported lifetime for input tensor: %s",
1025 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001026 return LayerInputHandle();
1027 }
arovir01b0717b52018-09-05 17:03:25 +01001028 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001029 }
1030 catch (UnsupportedOperand<HalOperandType>& e)
1031 {
1032 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1033 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001034 }
1035}
1036
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001037template<typename HalPolicy,
1038 typename HalOperation = typename HalPolicy::Operation,
1039 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001040bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1041 uint32_t operationOutputIndex,
1042 armnn::IConnectableLayer& layer,
1043 uint32_t layerOutputIndex,
1044 const HalModel& model,
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001045 ConversionData& data,
1046 const armnn::Optional<armnn::TensorInfo>& outputInfo = armnn::EmptyOptional())
Mike Kellyb5fdf382019-06-11 16:35:25 +01001047{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001048 using HalOperand = typename HalPolicy::Operand;
1049
1050 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001051 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1052 {
1053 return false;
1054 }
1055
1056 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1057
1058 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1059 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1060
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001061 if (outputInfo.has_value())
1062 {
1063 outputSlot.SetTensorInfo(outputInfo.value());
1064 ALOGD("Output info overwritten");
1065 }
1066 else
1067 {
1068 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1069 }
Mike Kellyb5fdf382019-06-11 16:35:25 +01001070
1071 return true;
1072}
1073
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001074template<typename HalPolicy,
1075 typename HalOperation = typename HalPolicy::Operation,
1076 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001077armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1078 uint32_t inputIndex,
1079 const HalModel& model,
1080 ConversionData& data)
1081{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001082 using HalOperand = typename HalPolicy::Operand;
1083
1084 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001085 if (!operand)
1086 {
1087 return armnn::DataLayout::NHWC;
1088 }
1089
1090 if (!IsBool(*operand))
1091 {
1092 return armnn::DataLayout::NHWC;
1093 }
1094
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001095 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001096 if (!valueAddress)
1097 {
1098 return armnn::DataLayout::NHWC;
1099 }
1100
1101 if (*(static_cast<const bool*>(valueAddress)))
1102 {
1103 return armnn::DataLayout::NCHW;
1104 }
1105 else
1106 {
1107 return armnn::DataLayout::NHWC;
1108 }
1109}
1110
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001111template<typename HalPolicy,
1112 typename HalOperation = typename HalPolicy::Operation,
1113 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001114bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1115 uint32_t outputIndex,
1116 armnn::IConnectableLayer& layer,
1117 const HalModel& model,
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001118 ConversionData& data,
1119 const armnn::Optional<armnn::TensorInfo>& outputInfo = armnn::EmptyOptional())
Mike Kellyb5fdf382019-06-11 16:35:25 +01001120{
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001121 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation,
1122 outputIndex,
1123 layer,
1124 outputIndex,
1125 model,
1126 data,
1127 outputInfo);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001128}
1129
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001130template<typename HalPolicy,
1131 typename HalOperation = typename HalPolicy::Operation,
1132 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001133bool ConvertToActivation(const HalOperation& operation,
1134 const char* operationName,
1135 const armnn::ActivationDescriptor& activationDesc,
1136 const HalModel& model,
1137 ConversionData& data)
1138{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001139 using HalOperand = typename HalPolicy::Operand;
1140
1141 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001142 if (!input.IsValid())
1143 {
1144 return Fail("%s: Input 0 is invalid", operationName);
1145 }
1146
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001147 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001148 if (!outputOperand)
1149 {
1150 return false;
1151 }
1152 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001153 if (!IsLayerSupportedForAnyBackend(__func__,
1154 armnn::IsActivationSupported,
1155 data.m_Backends,
1156 input.GetTensorInfo(),
1157 outInfo,
1158 activationDesc))
arovir01b0717b52018-09-05 17:03:25 +01001159 {
1160 return false;
1161 }
1162
1163 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1164 BOOST_ASSERT(layer != nullptr);
1165 input.Connect(layer->GetInputSlot(0));
1166
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001167 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001168}
1169
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001170template<typename HalPolicy,
1171 typename HalOperation = typename HalPolicy::Operation,
1172 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001173bool ConvertPaddings(const HalOperation& operation,
1174 const HalModel& model,
1175 ConversionData& data,
1176 unsigned int rank,
1177 armnn::PadDescriptor& padDescriptor)
1178{
1179 using HalOperand = typename HalPolicy::Operand;
1180
1181 const HalOperand* paddingsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
1182 if (!paddingsOperand)
1183 {
1184 return Fail("%s: Could not read paddings operand", __func__);
1185 }
1186
1187 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
1188 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
1189 {
1190 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
1191 }
1192
1193 std::vector<int32_t> paddings;
1194 GetTensorInt32Values<HalPolicy>(*paddingsOperand, paddings, model, data);
1195
1196 // add padding for each dimension of input tensor.
1197 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
1198 {
1199 int paddingBeforeInput = paddings[i];
1200 int paddingAfterInput = paddings[i + 1];
1201
1202 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
1203 {
1204 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
1205 }
1206
1207 padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
1208 }
1209
1210 return true;
1211}
1212
1213template<typename HalPolicy,
1214 typename HalOperation = typename HalPolicy::Operation,
1215 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001216bool ConvertPooling2d(const HalOperation& operation,
1217 const char* operationName,
1218 armnn::PoolingAlgorithm poolType,
1219 const HalModel& model,
1220 ConversionData& data)
1221{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001222 using HalOperand = typename HalPolicy::Operand;
1223 using HalOperandType = typename HalPolicy::OperandType;
1224
1225 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001226 if (!input.IsValid())
1227 {
1228 return Fail("%s: Could not read input 0", operationName);
1229 }
1230
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001231 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001232 if (!output)
1233 {
1234 return Fail("%s: Could not read output 0", __func__);
1235 }
1236
1237 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1238 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1239
arovir01b0717b52018-09-05 17:03:25 +01001240 armnn::Pooling2dDescriptor desc;
1241 desc.m_PoolType = poolType;
1242 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001243 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001244
1245 ActivationFn activation;
1246
1247 if (operation.inputs.size() == 7)
1248 {
1249 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1250 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001251 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1252 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1253 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1254 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1255 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1256 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001257 {
1258 return Fail("%s: Operation has invalid inputs", operationName);
1259 }
1260
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001261 const unsigned int inputWidth = inputInfo.GetShape()[2];
1262 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001263
1264 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1265 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1266 }
1267 else
1268 {
1269 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001270 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1271 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1272 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1273 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1274 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1275 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1276 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1277 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1278 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001279 {
1280 return Fail("%s: Operation has invalid inputs", operationName);
1281 }
1282 }
1283
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001284 if (!IsLayerSupportedForAnyBackend(__func__,
1285 armnn::IsPooling2dSupported,
1286 data.m_Backends,
1287 inputInfo,
1288 outputInfo,
1289 desc))
arovir01b0717b52018-09-05 17:03:25 +01001290 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001291 return false;
arovir01b0717b52018-09-05 17:03:25 +01001292 }
arovir01b0717b52018-09-05 17:03:25 +01001293
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001294 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1295 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001296 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001297 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001298 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001299
1300 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1301 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001302 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001303 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001304 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001305
1306 input.Connect(pooling2dLayer->GetInputSlot(0));
1307
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001308 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001309}
1310
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001311template<typename HalPolicy,
1312 typename HalOperation = typename HalPolicy::Operation,
1313 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001314bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1315{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001316 using HalOperand = typename HalPolicy::Operand;
1317 using HalOperandType = typename HalPolicy::OperandType;
1318
1319 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001320 if (!input.IsValid())
1321 {
1322 return Fail("%s: Operation has invalid inputs", __func__);
1323 }
1324
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001325 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001326 if (!output)
1327 {
1328 return Fail("%s: Could not read output 0", __func__);
1329 }
1330
1331 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1332 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1333
1334 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001335 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1336 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001337
1338 if (!weightsPin.IsValid() || !biasPin.IsValid())
1339 {
1340 return Fail("%s: Operation has invalid inputs", __func__);
1341 }
1342
1343 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1344 armnn::ConstTensor bias = biasPin.GetConstTensor();
1345 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1346
1347 armnn::Convolution2dDescriptor desc;
1348 desc.m_DataLayout = armnn::DataLayout::NHWC;
1349 ActivationFn activation;
1350
1351 if (operation.inputs.size() >= 10)
1352 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001353 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1354 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1355 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1356 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1357 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1358 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1359 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data) ||
1360 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 11, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001361 {
1362 return Fail("%s: Operation has invalid inputs", __func__);
1363 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001364 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001365 }
1366 else if (operation.inputs.size() >= 7)
1367 {
1368 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001369 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1370 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1371 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1372 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data) ||
1373 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 8, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001374 {
1375 return Fail("%s: Operation has invalid inputs", __func__);
1376 }
1377
1378 const uint32_t kernelX = weights.GetShape()[2];
1379 const uint32_t kernelY = weights.GetShape()[1];
1380 const uint32_t inputX = inputInfo.GetShape()[2];
1381 const uint32_t inputY = inputInfo.GetShape()[1];
1382
1383 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1384 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1385
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001386 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001387 }
1388 else
1389 {
1390 return Fail("%s: Unsupported number of operation inputs", __func__);
1391 }
1392
1393 desc.m_BiasEnabled = true;
1394 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1395
1396 if (!IsLayerSupportedForAnyBackend(__func__,
1397 armnn::IsConvolution2dSupported,
1398 data.m_Backends,
1399 inputInfo,
1400 outputInfo,
1401 desc,
1402 weights.GetInfo(),
1403 biases))
1404 {
1405 return false;
1406 }
1407
1408 armnn::IConnectableLayer* startLayer =
1409 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1410
1411 if (!startLayer)
1412 {
1413 return Fail("%s: AddConvolution2dLayer failed", __func__);
1414 }
1415
1416 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1417
1418 if (!endLayer)
1419 {
1420 return Fail("%s: ProcessActivation failed", __func__);
1421 }
1422
1423 input.Connect(startLayer->GetInputSlot(0));
1424
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001425 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001426}
1427
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001428template<typename HalPolicy,
1429 typename HalOperation = typename HalPolicy::Operation,
1430 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001431bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1432{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001433 using HalOperand = typename HalPolicy::Operand;
1434 using HalOperandType = typename HalPolicy::OperandType;
1435
1436 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001437
1438 if (!input.IsValid())
1439 {
1440 return Fail("%s: Operation has invalid inputs", __func__);
1441 }
1442
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001443 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001444
1445 if (!output)
1446 {
1447 return Fail("%s: Could not read output 0", __func__);
1448 }
1449
1450 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1451 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1452
1453 // ArmNN does not currently support non-fixed weights or bias
1454
1455 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001456 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001457
1458 if (weightsOperand == nullptr)
1459 {
1460 return Fail("%s: Operand is invalid", __func__);
1461 }
1462 armnn::DepthwiseConvolution2dDescriptor desc;
1463 desc.m_DataLayout = armnn::DataLayout::NHWC;
1464
1465 // Look ahead to find the optional DataLayout, if present
1466 if (operation.inputs.size() >= 12)
1467 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001468 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 11, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001469 }
1470 else if (operation.inputs.size() >= 9)
1471 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001472 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 8, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001473 }
1474
1475 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1476 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
1477 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1478 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1479
1480 // Reinterpret weight data as [ H, W, I, M ]
1481 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1482 weightsOperand->dimensions[2],
1483 inputInfo.GetShape()[channelsIndex],
1484 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
1485
1486 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1487 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1488
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001489 const ConstTensorPin weightsPin =
1490 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1491 1,
1492 model,
1493 data,
1494 HWIMToMIHW,
1495 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001496
1497 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001498 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001499
1500 if (!weightsPin.IsValid() || !biasPin.IsValid())
1501 {
1502 return Fail("%s: Operation has invalid inputs", __func__);
1503 }
1504
1505 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1506 armnn::ConstTensor bias = biasPin.GetConstTensor();
1507 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1508
1509 ActivationFn activation;
1510
1511 if (operation.inputs.size() >= 11)
1512 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001513 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1514 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1515 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1516 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1517 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1518 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1519 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data) ||
1520 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 12, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001521 {
1522 return Fail("%s: Operation has invalid inputs", __func__);
1523 }
1524 }
1525 else if (operation.inputs.size() >= 8)
1526 {
1527 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001528 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1529 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1530 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1531 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data) ||
1532 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 9, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001533 {
1534 return Fail("%s: Operation has invalid inputs", __func__);
1535 }
1536
1537 const uint32_t kernelX = weights.GetShape()[3];
1538 const uint32_t kernelY = weights.GetShape()[2];
1539 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1540 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1541
1542 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1543 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1544 }
1545 else
1546 {
1547 return Fail("%s: Unsupported number of operation inputs", __func__);
1548 }
1549
1550 desc.m_BiasEnabled = true;
1551 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1552
1553 if (!IsLayerSupportedForAnyBackend(__func__,
1554 armnn::IsDepthwiseConvolutionSupported,
1555 data.m_Backends,
1556 inputInfo,
1557 outputInfo,
1558 desc,
1559 weights.GetInfo(),
1560 biases))
1561 {
1562 return false;
1563 }
1564
1565 armnn::IConnectableLayer* startLayer =
1566 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1567 if (!startLayer)
1568 {
1569 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1570 }
1571
1572 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1573 if (!endLayer)
1574 {
1575 return Fail("%s: ProcessActivation failed", __func__);
1576 }
1577
1578 input.Connect(startLayer->GetInputSlot(0));
1579
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001580 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001581}
1582
saoste01b8471482018-10-10 09:44:51 +01001583} // namespace armnn_driver