blob: 9a711cb72339cd1a939710ecf4f47d02d943f13c [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010011#include "armnn/src/armnnUtils/Permute.hpp"
12#include "Utils.hpp"
13
14#include <ActivationFunctor.h>
15#include <CpuExecutor.h>
16#include <OperationsUtils.h>
17
18#include <boost/assert.hpp>
19#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010020#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010021#include <boost/test/tools/floating_point_comparison.hpp>
22
23#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010024#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010025
26namespace armnn_driver
27{
28
29///
30/// Helper classes
31///
32
33struct ConversionData
34{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010035 ConversionData(const std::vector<armnn::BackendId>& backends)
36 : m_Backends(backends)
37 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010038 {}
39
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010041 armnn::INetworkPtr m_Network;
42 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
43 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
44};
45
46class LayerInputHandle
47{
48public:
49 LayerInputHandle();
50 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
51
52 bool IsValid() const;
53
54 void Connect(armnn::IInputSlot& inputSlot);
55
56 const armnn::TensorInfo& GetTensorInfo() const;
57
58private:
59 armnn::IOutputSlot* m_OutputSlot;
60 bool m_Valid;
61 armnn::TensorInfo m_TensorInfo;
62};
63
64class ConstTensorPin
65{
66public:
67 // Creates an invalid tensor pin (can be used to signal errors)
68 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
69 ConstTensorPin(bool optional = false);
70
71 // @param tensorInfo TensorInfo associated with the tensor.
72 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
73 // the model being converted.
74 // @param numBytes Number of bytes for the tensor data.
75 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
76 const armnn::PermutationVector& mappings);
77
78 ConstTensorPin(const ConstTensorPin& other) = delete;
79 ConstTensorPin(ConstTensorPin&& other) = default;
80
81 bool IsValid() const;
82 bool IsOptional() const;
83
84 const armnn::ConstTensor& GetConstTensor() const;
85 const armnn::ConstTensor* GetConstTensorPtr() const;
86
87private:
88 armnn::ConstTensor m_ConstTensor;
89
90 // Owned memory for swizzled tensor data, only required if the tensor needed
91 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
92 // the pools associated with the model being converted.
93 std::vector<uint8_t> m_SwizzledTensorData;
94
95 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
96 bool m_Optional;
97};
98
99} // namespace armnn_driver
100
101///
102/// Utility functions
103///
104
105namespace
106{
107
108using namespace armnn_driver;
109using namespace android::nn;
110
111// Convenience function to log the reason for failing to convert a model.
112// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
113template<class... Args>
114static bool Fail(const char* formatStr, Args&&... args)
115{
116 ALOGD(formatStr, std::forward<Args>(args)...);
117 return false;
118}
119
120// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
121// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
122template<typename IsLayerSupportedFunc, typename ... Args>
123bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
124{
125 std::vector<char> unsupportedReason(1024+1);
126 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
127 if(isSupported)
128 {
129 return true;
130 }
131 else
132 {
133 std::string sUnsupportedReason(unsupportedReason.data());
134 if (sUnsupportedReason.size() > 0)
135 {
136 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
137 } else
138 {
139 ALOGD("%s: not supported by armnn", funcName);
140 }
141 return false;
142 }
143}
144
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100145template<typename IsLayerSupportedFunc, typename ... Args>
146bool IsLayerSupportedForAnyBackend(const char* funcName,
147 IsLayerSupportedFunc f,
148 const std::vector<armnn::BackendId>& backends,
149 Args&&... args)
150{
151 for (auto&& backend : backends)
152 {
153 if (IsLayerSupported(funcName, f, backend, std::forward<Args>(args)...))
154 {
155 return true;
156 }
157 }
158
159 ALOGD("%s: not supported by any specified backend", funcName);
160 return false;
161}
162
Mike Kellyb5fdf382019-06-11 16:35:25 +0100163template<typename Operand>
164armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100165{
166 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
167}
168
Matthew Bentham912b3622019-05-03 15:49:14 +0100169inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100170{
Matthew Bentham912b3622019-05-03 15:49:14 +0100171 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
172 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
173 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100174}
175
Mike Kellyb5fdf382019-06-11 16:35:25 +0100176#ifdef ARMNN_ANDROID_NN_V1_2
177
178inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
179{
180 return type == V1_2::OperandType::BOOL ||
181 type == V1_2::OperandType::TENSOR_FLOAT16 ||
182 type == V1_2::OperandType::TENSOR_FLOAT32 ||
183 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
184 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
185 type == V1_2::OperandType::TENSOR_INT32;
186}
187
188#endif
189
190inline bool IsBool(V1_0::Operand)
191{
192 return false;
193}
194
195#ifdef ARMNN_ANDROID_NN_V1_2
196
197inline bool IsBool(V1_2::Operand operand)
198{
199 return operand.type == V1_2::OperandType::BOOL;
200}
201
202#endif
203
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100204template<typename LayerHandleType>
205armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, LayerHandleType& inputLayer,
206 armnn::TensorInfo reshapeInfo)
207{
208 armnn::ReshapeDescriptor reshapeDescriptor;
209 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
210
211 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
212 BOOST_ASSERT(reshapeLayer != nullptr);
213
214 // Attach the input layer to the reshape layer
215 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
216 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
217
218 return *reshapeLayer;
219}
220
221void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1,
222 armnn::IConnectableLayer* startLayer, armnn::INetwork& network)
arovir01b0717b52018-09-05 17:03:25 +0100223{
224 BOOST_ASSERT(startLayer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +0100225
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100226 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
227 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
228
229 unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
230 unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
231
232 if (inputDimensions0 == inputDimensions1)
arovir01b0717b52018-09-05 17:03:25 +0100233 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100234 // The inputs have the same number of dimensions, simply connect them to the given layer as they are
235 input0.Connect(startLayer->GetInputSlot(0));
236 input1.Connect(startLayer->GetInputSlot(1));
237
238 return;
239 }
240
241 // Since the number of dimensions do not match then we need to add degenerate dimensions
242 // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
243
244 unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
245 unsigned int sizeDifference = std::abs(boost::numeric_cast<int>(inputDimensions0) -
246 boost::numeric_cast<int>(inputDimensions1));
247
248 bool input0IsSmaller = inputDimensions0 < inputDimensions1;
249 LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
250 const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
251
252 const armnn::TensorShape& smallShape = smallInfo.GetShape();
253 std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
254 for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
255 {
256 reshapedDimensions[i] = smallShape[i - sizeDifference];
257 }
258
259 armnn::TensorInfo reshapedInfo = smallInfo;
260 reshapedInfo.SetShape(armnn::TensorShape{ boost::numeric_cast<unsigned int>(reshapedDimensions.size()),
261 reshapedDimensions.data() });
262 armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(network, smallInputHandle, reshapedInfo);
263
264 if (input0IsSmaller)
265 {
266 // Input0 is the "smaller" tensor, connect the reshape layer as follows:
267 //
268 // Input0 Input1
arovir01b0717b52018-09-05 17:03:25 +0100269 // | |
270 // Reshape |
271 // \ /
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100272 // StartLayer
arovir01b0717b52018-09-05 17:03:25 +0100273
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100274 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
275 input1.Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100276 }
277 else
278 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100279 // Input1 is the "smaller" tensor, connect the reshape layer as follows:
280 //
281 // Input0 Input1
282 // | |
283 // | Reshape
284 // \ /
285 // StartLayer
286
arovir01b0717b52018-09-05 17:03:25 +0100287 input0.Connect(startLayer->GetInputSlot(0));
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100288 reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
arovir01b0717b52018-09-05 17:03:25 +0100289 }
290}
291
292void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
293 android::nn::PaddingScheme scheme)
294{
295 int32_t padHead;
296 int32_t padTail;
297 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
298 outPadHead = boost::numeric_cast<uint32_t>(padHead);
299 outPadTail = boost::numeric_cast<uint32_t>(padTail);
300}
301
Matthew Bentham912b3622019-05-03 15:49:14 +0100302Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100303{
304 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100305 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100306 shape.dimensions = operand.dimensions;
307 shape.scale = operand.scale;
308 shape.offset = operand.zeroPoint;
309 return shape;
310}
311
312// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
313// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
314// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
315// (us, in this case) to ensure they match.
316void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
317 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
318{
319 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
320 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
321 {
322 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
323 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
324 {
325 ALOGW("Bias quantization scale has been modified to match input*weights");
326 biasInfo.SetQuantizationScale(expectedBiasScale);
327 }
328 }
329}
330
331// 4D Tensor Permutations
332const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
333const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
334const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
335const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
336
337// 3D Permutation Vectors
338const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
339const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
340const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
341
342template<typename OSlot>
343armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
344 const armnn::PermutationVector& mappings)
345{
346 // Add swizzle layer
347 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
348
349 BOOST_ASSERT(layer != nullptr);
350
351 // Connect input to swizzle layer
352 input.Connect(layer->GetInputSlot(0));
353
354 // Setup swizzled output
355 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
356 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
357
358 return *layer;
359}
360
361void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
362{
363 // Add swizzle layer
364 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
365 // Connect swizzled input to layer
366 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
367}
368
369armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
370{
371 // Add deswizzle layer
372 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
373 return deswizzleLayer;
374}
375
376// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
377armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
378 LayerInputHandle& input,
379 armnn::IConnectableLayer& firstLayer,
380 armnn::IConnectableLayer& lastLayer)
381{
382 SwizzleIn(network, input, firstLayer, 0);
383 return DeswizzleOut(network, lastLayer, 0);
384}
385
386// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
387armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
388 armnn::IConnectableLayer& layer)
389{
390 return SwizzleInDeswizzleOut(network, input, layer, layer);
391}
392
393bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
394 const armnn::TensorShape & outputShape,
395 uint32_t concatDim)
396{
397 // Validate the output shape is correct given the input shapes (which have just been validated)
398 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
399 if (outputShape.GetNumDimensions() != numDimensions)
400 {
401 return Fail("%s: Output shape has wrong number of dimensions", __func__);
402 }
403
404 unsigned int outputSizeAlongConcatenatedDimension = 0;
405 for (unsigned int i = 0; i < inputShapes.size(); i++)
406 {
407 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
408 }
409
410 for (unsigned int i = 0; i < numDimensions; ++i)
411 {
412 if (i == concatDim)
413 {
414 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
415 {
416 return Fail(
417 "%s: Invalid output shape for dimension %d (%d != %d)",
418 __func__,
419 i,
420 outputShape[i],
421 outputSizeAlongConcatenatedDimension);
422 }
423 }
424 else
425 {
426 if (outputShape[i] != inputShapes[0][i])
427 {
428 return Fail("%s: Invalid output shape", __func__);
429 }
430 }
431 }
432
433 return true;
434}
435
436bool RequiresReshape(armnn::TensorShape & inputShape)
437{
438 return inputShape.GetNumDimensions() < 3;
439}
440
arovir01b0717b52018-09-05 17:03:25 +0100441void SwizzleInputs(armnn::INetwork& network,
442 std::vector<LayerInputHandle>& inputs,
443 std::vector<armnn::TensorShape>& inputShapes,
444 const armnn::PermutationVector& mapping)
445{
446 if (!mapping.IsEqual(IdentityPermutation4D))
447 {
448 size_t nInputs = inputs.size();
449 for (size_t i=0; i<nInputs; ++i)
450 {
451 // add swizzle layer
452 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
453 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
454 auto& outputInfo = outputSlot.GetTensorInfo();
455 // replace inputs with the swizzled ones
456 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
457 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
458 }
459 }
460}
461
narpra01f176d5a2018-11-18 20:17:48 +0000462bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
463 int32_t & concatDimension,
464 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100465{
narpra01f176d5a2018-11-18 20:17:48 +0000466 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100467 BOOST_ASSERT(numberOfDimensions >= 3);
468
469 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000470 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
471 // or along dimension 0 or 2 for a 3-D tensor.
472 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100473 {
narpra01f176d5a2018-11-18 20:17:48 +0000474 concatDimension = 1;
475 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
476 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100477 }
narpra01f176d5a2018-11-18 20:17:48 +0000478 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100479 {
narpra01f176d5a2018-11-18 20:17:48 +0000480 concatDimension = 0;
481 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
482 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100483 }
narpra01f176d5a2018-11-18 20:17:48 +0000484 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100485}
486
487} // anonymous namespace
488
489namespace armnn_driver
490{
491
492//// Creates an ArmNN activation layer and connects it to the given layer, if the
493//// passed in AndroidNN activation function requires so.
494//// @return The end layer of the sequence of layers built for the given AndroidNN
495//// activation function or nullptr if an error occurred (e.g. unsupported activation).
496//// Note that the end layer matches the input layer if no activation is required
497//// (the sequence of layers has length 1).
498armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
499 ActivationFn activation,
500 armnn::IConnectableLayer* prevLayer,
501 ConversionData& data);
502
503} // namespace armnn_driver
504
505///
506/// Utility templates
507///
508
509namespace armnn_driver
510{
511
512using namespace android::nn;
513
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100514template<typename HalPolicy,
515 typename HalOperand = typename HalPolicy::Operand,
516 typename HalOperation = typename HalPolicy::Operation,
517 typename HalModel = typename HalPolicy::Model>
518const HalOperand* GetInputOperand(const HalOperation& operation,
519 uint32_t inputIndex,
520 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100521 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100522{
523 if (inputIndex >= operation.inputs.size())
524 {
saoste01b8471482018-10-10 09:44:51 +0100525 if (failOnIndexOutOfBounds)
526 {
527 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
528 }
arovir01b0717b52018-09-05 17:03:25 +0100529 return nullptr;
530 }
531
532 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
533 return &model.operands[operation.inputs[inputIndex]];
534}
535
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100536template<typename HalPolicy,
537 typename HalOperand = typename HalPolicy::Operand,
538 typename HalOperation = typename HalPolicy::Operation,
539 typename HalModel = typename HalPolicy::Model>
540const HalOperand* GetOutputOperand(const HalOperation& operation,
541 uint32_t outputIndex,
542 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100543{
544 if (outputIndex >= operation.outputs.size())
545 {
546 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
547 return nullptr;
548 }
549
550 // Model should have been validated beforehand
551 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
552
553 return &model.operands[operation.outputs[outputIndex]];
554}
555
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100556template<typename HalPolicy,
557 typename HalOperand = typename HalPolicy::Operand,
558 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100559const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100560 const HalModel& model,
561 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000562 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100563{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100564 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100565
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100566 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100567 switch (operand.lifetime)
568 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100569 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100570 {
571 // Constant found in model.operandValues
572 valueStart = &model.operandValues[operand.location.offset];
573 break;
574 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100575 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100576 {
577 // Constant specified via a Memory object
578 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
579 break;
580 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100581 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000582 {
583 // An optional input tensor with no values is not an error so should not register as a fail
584 if (optional)
585 {
586 valueStart = nullptr;
587 break;
588 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100589 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000590 }
arovir01b0717b52018-09-05 17:03:25 +0100591 default:
592 {
593 // Unsupported/invalid (e.g. can't get value of an input to the model)
594 Fail("%s: unsupported/invalid operand lifetime: %s",
595 __func__, toString(operand.lifetime).c_str());
596 valueStart = nullptr;
597 }
598 }
599
600 return valueStart;
601}
602
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100603template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100604 typename HalOperation = typename HalPolicy::Operation,
605 typename HalModel = typename HalPolicy::Model,
606 typename HalOperandType = typename HalPolicy::OperandType>
607bool GetOperandType(const HalOperation& operation,
608 uint32_t inputIndex,
609 const HalModel& model,
610 HalOperandType& type)
611{
612 using HalOperand = typename HalPolicy::Operand;
613
614 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
615 if (!operand)
616 {
617 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
618 }
619
620 type = operand->type;
621 return true;
622}
623
624template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100625 typename HalOperand = typename HalPolicy::Operand,
626 typename HalModel = typename HalPolicy::Model>
627ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
628 const HalModel& model,
629 const ConversionData& data,
630 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
631 const armnn::TensorShape* overrideTensorShape = nullptr,
632 bool optional = false)
633{
634 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
635
636 if (!IsOperandTypeSupportedForTensors(operand.type))
637 {
638 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
639 return ConstTensorPin();
640 }
641
642 if (!optional &&
643 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
644 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
645 operand.lifetime != HalOperandLifeTime::NO_VALUE)
646 {
647 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
648 return ConstTensorPin();
649 }
650
651 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
652 if (!valueStart)
653 {
654 if (optional)
655 {
656 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
657 return ConstTensorPin(true);
658 }
659 // mandatory tensor with no values
660 Fail("%s: failed to get operand address", __func__);
661 return ConstTensorPin();
662 }
663
664 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
665 if (overrideTensorShape != nullptr)
666 {
667 tensorInfo.SetShape(*overrideTensorShape);
668 }
669 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
670}
671
672template<typename HalPolicy,
673 typename HalOperation = typename HalPolicy::Operation,
674 typename HalModel = typename HalPolicy::Model>
675ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
676 uint32_t inputIndex,
677 const HalModel& model,
678 const ConversionData& data,
679 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
680 const armnn::TensorShape* overrideTensorShape = nullptr,
681 bool optional = false)
682{
683 using HalOperand = typename HalPolicy::Operand;
684
685 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
686 if (!operand)
687 {
688 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
689 return ConstTensorPin();
690 }
691 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
692 model,
693 data,
694 dimensionMappings,
695 overrideTensorShape,
696 optional);
697}
698
699template<typename HalPolicy,
700 typename OutputType,
701 typename HalOperandType = typename HalPolicy::OperandType,
702 typename HalOperation = typename HalPolicy::Operation,
703 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100704bool GetInputScalar(const HalOperation& operation,
705 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100706 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100707 OutputType& outValue,
708 const HalModel& model,
709 const ConversionData& data)
710{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100711 using HalOperand = typename HalPolicy::Operand;
712
713 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100714 if (!operand)
715 {
716 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
717 }
718
719 if (operand->type != type)
720 {
721 return Fail("%s: unexpected operand type: %s (should be %s)",
722 __func__, toString(operand->type).c_str(), toString(type).c_str());
723 }
724
725 if (operand->location.length != sizeof(OutputType))
726 {
727 return Fail("%s: incorrect operand location length: %i (should be %i)",
728 __func__, operand->location.length, sizeof(OutputType));
729 }
730
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100731 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100732 if (!valueAddress)
733 {
734 return Fail("%s: failed to get address for operand", __func__);
735 }
736
737 outValue = *(static_cast<const OutputType*>(valueAddress));
738 return true;
739}
740
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100741template<typename HalPolicy,
742 typename HalOperation = typename HalPolicy::Operation,
743 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100744bool GetInputInt32(const HalOperation& operation,
745 uint32_t inputIndex,
746 int32_t& outValue,
747 const HalModel& model,
748 const ConversionData& data)
749{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100750 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100751}
752
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100753template<typename HalPolicy,
754 typename HalOperation = typename HalPolicy::Operation,
755 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100756bool GetInputFloat32(const HalOperation& operation,
757 uint32_t inputIndex,
758 float& outValue,
759 const HalModel& model,
760 const ConversionData& data)
761{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100762 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100763}
764
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100765template<typename HalPolicy,
766 typename HalOperation = typename HalPolicy::Operation,
767 typename HalOperandType = typename HalPolicy::OperandType,
768 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100769bool GetInputActivationFunctionImpl(const HalOperation& operation,
770 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100771 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100772 ActivationFn& outActivationFunction,
773 const HalModel& model,
774 const ConversionData& data)
775{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100776 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100777 {
778 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
779 __func__,
780 toString(type).c_str(),
781 toString(OperandType::INT32).c_str(),
782 toString(OperandType::TENSOR_INT32).c_str());
783 }
784
785 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100786 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100787 {
788 return Fail("%s: failed to get activation input value", __func__);
789 }
790 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
791 return true;
792}
793
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100794template<typename HalPolicy,
795 typename HalOperation = typename HalPolicy::Operation,
796 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100797bool GetInputActivationFunction(const HalOperation& operation,
798 uint32_t inputIndex,
799 ActivationFn& outActivationFunction,
800 const HalModel& model,
801 const ConversionData& data)
802{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100803 return GetInputActivationFunctionImpl<HalPolicy>(operation,
804 inputIndex,
805 HalPolicy::OperandType::INT32,
806 outActivationFunction,
807 model,
808 data);
arovir01b0717b52018-09-05 17:03:25 +0100809}
810
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811template<typename HalPolicy,
812 typename HalOperation = typename HalPolicy::Operation,
813 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100814bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
815 uint32_t inputIndex,
816 ActivationFn& outActivationFunction,
817 const HalModel& model,
818 const ConversionData& data)
819{
820 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100821 return GetInputActivationFunctionImpl<HalPolicy>(operation,
822 inputIndex,
823 HalPolicy::OperandType::INT32,
824 outActivationFunction,
825 model,
826 data);
arovir01b0717b52018-09-05 17:03:25 +0100827}
828
829
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100830template<typename HalPolicy,
831 typename HalOperation = typename HalPolicy::Operation,
832 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100833bool GetOptionalInputActivation(const HalOperation& operation,
834 uint32_t inputIndex,
835 ActivationFn& activationFunction,
836 const HalModel& model,
837 const ConversionData& data)
838{
839 if (operation.inputs.size() <= inputIndex)
840 {
841 activationFunction = ActivationFn::kActivationNone;
842 }
843 else
844 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100845 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100846 {
847 return Fail("%s: Operation has invalid inputs", __func__);
848 }
849 }
850 return true;
851}
852
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100853template<typename HalPolicy,
854 typename ConvolutionDescriptor,
855 typename HalOperation = typename HalPolicy::Operation,
856 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100857bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
858 uint32_t dilationXIndex,
859 ConvolutionDescriptor& descriptor,
860 const HalModel& model,
861 const ConversionData& data)
862{
863 bool success = true;
864 if (operation.inputs.size() >= dilationXIndex + 2)
865 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100866 success &= GetInputScalar<HalPolicy>(operation,
867 dilationXIndex,
868 HalPolicy::OperandType::INT32,
869 descriptor.m_DilationX,
870 model,
871 data);
872 success &= GetInputScalar<HalPolicy>(operation,
873 dilationXIndex + 1,
874 HalPolicy::OperandType::INT32,
875 descriptor.m_DilationY,
876 model,
877 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100878 }
879
880 return success;
881}
882
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100883template<typename HalPolicy,
884 typename HalOperand = typename HalPolicy::Operand,
885 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100886bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100887 std::vector<int32_t>& outValues,
888 const HalModel& model,
889 const ConversionData& data)
890{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100891 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100892 {
893 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
894 }
895
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100896 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100897 if (!startAddress)
898 {
899 return Fail("%s: failed to get operand address", __func__, operand.type);
900 }
901
902 // Check number of bytes is sensible
903 const uint32_t numBytes = operand.location.length;
904 if (numBytes % sizeof(int32_t) != 0)
905 {
906 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
907 __func__, numBytes, sizeof(int32_t));
908 }
909
910 outValues.resize(numBytes / sizeof(int32_t));
911 memcpy(outValues.data(), startAddress, numBytes);
912 return true;
913}
914
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100915template<typename HalPolicy,
916 typename HalOperation = typename HalPolicy::Operation,
917 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100918bool GetInputPaddingScheme(const HalOperation& operation,
919 uint32_t inputIndex,
920 PaddingScheme& outPaddingScheme,
921 const HalModel& model,
922 const ConversionData& data)
923{
924 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100925 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100926 {
927 return Fail("%s: failed to get padding scheme input value", __func__);
928 }
929
930 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
931 return true;
932}
933
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100934template<typename HalPolicy,
935 typename HalOperation = typename HalPolicy::Operation,
936 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100937LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
938 uint32_t inputIndex,
939 const HalModel& model,
940 ConversionData& data)
941{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100942 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +0100943 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100944 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
945
946 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100947 if (!operand)
948 {
949 Fail("%s: failed to get input operand %i", __func__, inputIndex);
950 return LayerInputHandle();
951 }
952
953 if (!IsOperandTypeSupportedForTensors(operand->type))
954 {
955 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
956 return LayerInputHandle();
957 }
958
Sadik Armagan44bcc022019-06-18 17:21:36 +0100959 try
arovir01b0717b52018-09-05 17:03:25 +0100960 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100961 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
arovir01b0717b52018-09-05 17:03:25 +0100962
Sadik Armagan44bcc022019-06-18 17:21:36 +0100963 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +0100964 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100965 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
966 case HalOperandLifeTime::MODEL_INPUT:
967 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +0100968 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100969 // The tensor is either an operand internal to the model, or a model input.
970 // It can be associated with an ArmNN output slot for an existing layer.
971
972 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
973 const uint32_t operandIndex = operation.inputs[inputIndex];
974 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
975 break;
976 }
977 case HalOperandLifeTime::CONSTANT_COPY:
978 case HalOperandLifeTime::CONSTANT_REFERENCE:
979 {
980 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
981 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
982 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +0100983 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100984 if (!IsLayerSupportedForAnyBackend(__func__,
985 armnn::IsConstantSupported,
986 data.m_Backends,
987 tensorPin.GetConstTensor().GetInfo()))
988 {
989 return LayerInputHandle();
990 }
991
992 armnn::IConnectableLayer* constantLayer =
993 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
994 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
995 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
996
997 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
998 }
999 else
1000 {
1001 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001002 return LayerInputHandle();
1003 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001004 break;
arovir01b0717b52018-09-05 17:03:25 +01001005 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001006 default:
arovir01b0717b52018-09-05 17:03:25 +01001007 {
Sadik Armagan44bcc022019-06-18 17:21:36 +01001008 // Unsupported lifetime for an input tensor
1009 Fail("%s: unsupported lifetime for input tensor: %s",
1010 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +01001011 return LayerInputHandle();
1012 }
arovir01b0717b52018-09-05 17:03:25 +01001013 }
Sadik Armagan44bcc022019-06-18 17:21:36 +01001014 }
1015 catch (UnsupportedOperand<HalOperandType>& e)
1016 {
1017 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
1018 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001019 }
1020}
1021
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001022template<typename HalPolicy,
1023 typename HalOperation = typename HalPolicy::Operation,
1024 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001025bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1026 uint32_t operationOutputIndex,
1027 armnn::IConnectableLayer& layer,
1028 uint32_t layerOutputIndex,
1029 const HalModel& model,
1030 ConversionData& data)
1031{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001032 using HalOperand = typename HalPolicy::Operand;
1033
1034 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001035 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1036 {
1037 return false;
1038 }
1039
1040 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1041
1042 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1043 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1044
1045 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1046
1047 return true;
1048}
1049
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001050template<typename HalPolicy,
1051 typename HalOperation = typename HalPolicy::Operation,
1052 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001053armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1054 uint32_t inputIndex,
1055 const HalModel& model,
1056 ConversionData& data)
1057{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001058 using HalOperand = typename HalPolicy::Operand;
1059
1060 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001061 if (!operand)
1062 {
1063 return armnn::DataLayout::NHWC;
1064 }
1065
1066 if (!IsBool(*operand))
1067 {
1068 return armnn::DataLayout::NHWC;
1069 }
1070
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001071 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001072 if (!valueAddress)
1073 {
1074 return armnn::DataLayout::NHWC;
1075 }
1076
1077 if (*(static_cast<const bool*>(valueAddress)))
1078 {
1079 return armnn::DataLayout::NCHW;
1080 }
1081 else
1082 {
1083 return armnn::DataLayout::NHWC;
1084 }
1085}
1086
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001087template<typename HalPolicy,
1088 typename HalOperation = typename HalPolicy::Operation,
1089 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001090bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1091 uint32_t outputIndex,
1092 armnn::IConnectableLayer& layer,
1093 const HalModel& model,
1094 ConversionData& data)
1095{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001096 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, outputIndex, layer, outputIndex, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001097}
1098
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001099template<typename HalPolicy,
1100 typename HalOperation = typename HalPolicy::Operation,
1101 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001102bool ConvertToActivation(const HalOperation& operation,
1103 const char* operationName,
1104 const armnn::ActivationDescriptor& activationDesc,
1105 const HalModel& model,
1106 ConversionData& data)
1107{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001108 using HalOperand = typename HalPolicy::Operand;
1109
1110 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001111 if (!input.IsValid())
1112 {
1113 return Fail("%s: Input 0 is invalid", operationName);
1114 }
1115
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001116 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001117 if (!outputOperand)
1118 {
1119 return false;
1120 }
1121 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001122 if (!IsLayerSupportedForAnyBackend(__func__,
1123 armnn::IsActivationSupported,
1124 data.m_Backends,
1125 input.GetTensorInfo(),
1126 outInfo,
1127 activationDesc))
arovir01b0717b52018-09-05 17:03:25 +01001128 {
1129 return false;
1130 }
1131
1132 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1133 BOOST_ASSERT(layer != nullptr);
1134 input.Connect(layer->GetInputSlot(0));
1135
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001136 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001137}
1138
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001139template<typename HalPolicy,
1140 typename HalOperation = typename HalPolicy::Operation,
1141 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001142bool ConvertPooling2d(const HalOperation& operation,
1143 const char* operationName,
1144 armnn::PoolingAlgorithm poolType,
1145 const HalModel& model,
1146 ConversionData& data)
1147{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001148 using HalOperand = typename HalPolicy::Operand;
1149 using HalOperandType = typename HalPolicy::OperandType;
1150
1151 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001152 if (!input.IsValid())
1153 {
1154 return Fail("%s: Could not read input 0", operationName);
1155 }
1156
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001157 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001158 if (!output)
1159 {
1160 return Fail("%s: Could not read output 0", __func__);
1161 }
1162
1163 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1164 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1165
arovir01b0717b52018-09-05 17:03:25 +01001166 armnn::Pooling2dDescriptor desc;
1167 desc.m_PoolType = poolType;
1168 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001169 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001170
1171 ActivationFn activation;
1172
1173 if (operation.inputs.size() == 7)
1174 {
1175 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1176 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001177 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1178 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1179 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1180 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1181 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1182 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001183 {
1184 return Fail("%s: Operation has invalid inputs", operationName);
1185 }
1186
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001187 const unsigned int inputWidth = inputInfo.GetShape()[2];
1188 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001189
1190 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1191 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1192 }
1193 else
1194 {
1195 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001196 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1197 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1198 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1199 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1200 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1201 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1202 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1203 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1204 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001205 {
1206 return Fail("%s: Operation has invalid inputs", operationName);
1207 }
1208 }
1209
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001210 if (!IsLayerSupportedForAnyBackend(__func__,
1211 armnn::IsPooling2dSupported,
1212 data.m_Backends,
1213 inputInfo,
1214 outputInfo,
1215 desc))
arovir01b0717b52018-09-05 17:03:25 +01001216 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001217 return false;
arovir01b0717b52018-09-05 17:03:25 +01001218 }
arovir01b0717b52018-09-05 17:03:25 +01001219
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001220 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1221 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001222 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001223 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001224 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001225
1226 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1227 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001228 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001229 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001230 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001231
1232 input.Connect(pooling2dLayer->GetInputSlot(0));
1233
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001234 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001235}
1236
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001237template<typename HalPolicy,
1238 typename HalOperation = typename HalPolicy::Operation,
1239 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001240bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1241{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001242 using HalOperand = typename HalPolicy::Operand;
1243 using HalOperandType = typename HalPolicy::OperandType;
1244
1245 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001246 if (!input.IsValid())
1247 {
1248 return Fail("%s: Operation has invalid inputs", __func__);
1249 }
1250
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001251 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001252 if (!output)
1253 {
1254 return Fail("%s: Could not read output 0", __func__);
1255 }
1256
1257 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1258 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1259
1260 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001261 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1262 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001263
1264 if (!weightsPin.IsValid() || !biasPin.IsValid())
1265 {
1266 return Fail("%s: Operation has invalid inputs", __func__);
1267 }
1268
1269 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1270 armnn::ConstTensor bias = biasPin.GetConstTensor();
1271 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1272
1273 armnn::Convolution2dDescriptor desc;
1274 desc.m_DataLayout = armnn::DataLayout::NHWC;
1275 ActivationFn activation;
1276
1277 if (operation.inputs.size() >= 10)
1278 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001279 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1280 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1281 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1282 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1283 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1284 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1285 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data) ||
1286 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 11, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001287 {
1288 return Fail("%s: Operation has invalid inputs", __func__);
1289 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001290 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001291 }
1292 else if (operation.inputs.size() >= 7)
1293 {
1294 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001295 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1296 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1297 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1298 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data) ||
1299 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 8, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001300 {
1301 return Fail("%s: Operation has invalid inputs", __func__);
1302 }
1303
1304 const uint32_t kernelX = weights.GetShape()[2];
1305 const uint32_t kernelY = weights.GetShape()[1];
1306 const uint32_t inputX = inputInfo.GetShape()[2];
1307 const uint32_t inputY = inputInfo.GetShape()[1];
1308
1309 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1310 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1311
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001312 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001313 }
1314 else
1315 {
1316 return Fail("%s: Unsupported number of operation inputs", __func__);
1317 }
1318
1319 desc.m_BiasEnabled = true;
1320 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1321
1322 if (!IsLayerSupportedForAnyBackend(__func__,
1323 armnn::IsConvolution2dSupported,
1324 data.m_Backends,
1325 inputInfo,
1326 outputInfo,
1327 desc,
1328 weights.GetInfo(),
1329 biases))
1330 {
1331 return false;
1332 }
1333
1334 armnn::IConnectableLayer* startLayer =
1335 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1336
1337 if (!startLayer)
1338 {
1339 return Fail("%s: AddConvolution2dLayer failed", __func__);
1340 }
1341
1342 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1343
1344 if (!endLayer)
1345 {
1346 return Fail("%s: ProcessActivation failed", __func__);
1347 }
1348
1349 input.Connect(startLayer->GetInputSlot(0));
1350
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001351 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001352}
1353
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001354template<typename HalPolicy,
1355 typename HalOperation = typename HalPolicy::Operation,
1356 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001357bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1358{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001359 using HalOperand = typename HalPolicy::Operand;
1360 using HalOperandType = typename HalPolicy::OperandType;
1361
1362 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001363
1364 if (!input.IsValid())
1365 {
1366 return Fail("%s: Operation has invalid inputs", __func__);
1367 }
1368
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001369 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001370
1371 if (!output)
1372 {
1373 return Fail("%s: Could not read output 0", __func__);
1374 }
1375
1376 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1377 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1378
1379 // ArmNN does not currently support non-fixed weights or bias
1380
1381 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001382 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001383
1384 if (weightsOperand == nullptr)
1385 {
1386 return Fail("%s: Operand is invalid", __func__);
1387 }
1388 armnn::DepthwiseConvolution2dDescriptor desc;
1389 desc.m_DataLayout = armnn::DataLayout::NHWC;
1390
1391 // Look ahead to find the optional DataLayout, if present
1392 if (operation.inputs.size() >= 12)
1393 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001394 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 11, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001395 }
1396 else if (operation.inputs.size() >= 9)
1397 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001398 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 8, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001399 }
1400
1401 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1402 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
1403 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1404 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1405
1406 // Reinterpret weight data as [ H, W, I, M ]
1407 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1408 weightsOperand->dimensions[2],
1409 inputInfo.GetShape()[channelsIndex],
1410 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
1411
1412 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1413 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1414
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001415 const ConstTensorPin weightsPin =
1416 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1417 1,
1418 model,
1419 data,
1420 HWIMToMIHW,
1421 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001422
1423 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001424 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001425
1426 if (!weightsPin.IsValid() || !biasPin.IsValid())
1427 {
1428 return Fail("%s: Operation has invalid inputs", __func__);
1429 }
1430
1431 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1432 armnn::ConstTensor bias = biasPin.GetConstTensor();
1433 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1434
1435 ActivationFn activation;
1436
1437 if (operation.inputs.size() >= 11)
1438 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001439 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1440 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1441 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1442 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1443 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1444 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1445 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data) ||
1446 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 12, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001447 {
1448 return Fail("%s: Operation has invalid inputs", __func__);
1449 }
1450 }
1451 else if (operation.inputs.size() >= 8)
1452 {
1453 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001454 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1455 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1456 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1457 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data) ||
1458 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 9, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001459 {
1460 return Fail("%s: Operation has invalid inputs", __func__);
1461 }
1462
1463 const uint32_t kernelX = weights.GetShape()[3];
1464 const uint32_t kernelY = weights.GetShape()[2];
1465 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1466 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1467
1468 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1469 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1470 }
1471 else
1472 {
1473 return Fail("%s: Unsupported number of operation inputs", __func__);
1474 }
1475
1476 desc.m_BiasEnabled = true;
1477 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1478
1479 if (!IsLayerSupportedForAnyBackend(__func__,
1480 armnn::IsDepthwiseConvolutionSupported,
1481 data.m_Backends,
1482 inputInfo,
1483 outputInfo,
1484 desc,
1485 weights.GetInfo(),
1486 biases))
1487 {
1488 return false;
1489 }
1490
1491 armnn::IConnectableLayer* startLayer =
1492 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1493 if (!startLayer)
1494 {
1495 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1496 }
1497
1498 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1499 if (!endLayer)
1500 {
1501 return Fail("%s: ProcessActivation failed", __func__);
1502 }
1503
1504 input.Connect(startLayer->GetInputSlot(0));
1505
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001506 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001507}
1508
saoste01b8471482018-10-10 09:44:51 +01001509} // namespace armnn_driver