blob: 03f4669613c6c656136c0a0de22c5df9b81a0522 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010011#include "armnn/src/armnnUtils/Permute.hpp"
12#include "Utils.hpp"
13
14#include <ActivationFunctor.h>
15#include <CpuExecutor.h>
16#include <OperationsUtils.h>
17
18#include <boost/assert.hpp>
19#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010020#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010021#include <boost/test/tools/floating_point_comparison.hpp>
22
23#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010024#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010025
26namespace armnn_driver
27{
28
29///
30/// Helper classes
31///
32
33struct ConversionData
34{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010035 ConversionData(const std::vector<armnn::BackendId>& backends)
36 : m_Backends(backends)
37 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010038 {}
39
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010041 armnn::INetworkPtr m_Network;
42 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
43 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
44};
45
46class LayerInputHandle
47{
48public:
49 LayerInputHandle();
50 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
51
52 bool IsValid() const;
53
54 void Connect(armnn::IInputSlot& inputSlot);
55
56 const armnn::TensorInfo& GetTensorInfo() const;
57
58private:
59 armnn::IOutputSlot* m_OutputSlot;
60 bool m_Valid;
61 armnn::TensorInfo m_TensorInfo;
62};
63
64class ConstTensorPin
65{
66public:
67 // Creates an invalid tensor pin (can be used to signal errors)
68 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
69 ConstTensorPin(bool optional = false);
70
71 // @param tensorInfo TensorInfo associated with the tensor.
72 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
73 // the model being converted.
74 // @param numBytes Number of bytes for the tensor data.
75 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
76 const armnn::PermutationVector& mappings);
77
78 ConstTensorPin(const ConstTensorPin& other) = delete;
79 ConstTensorPin(ConstTensorPin&& other) = default;
80
81 bool IsValid() const;
82 bool IsOptional() const;
83
84 const armnn::ConstTensor& GetConstTensor() const;
85 const armnn::ConstTensor* GetConstTensorPtr() const;
86
87private:
88 armnn::ConstTensor m_ConstTensor;
89
90 // Owned memory for swizzled tensor data, only required if the tensor needed
91 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
92 // the pools associated with the model being converted.
93 std::vector<uint8_t> m_SwizzledTensorData;
94
95 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
96 bool m_Optional;
97};
98
99} // namespace armnn_driver
100
101///
102/// Utility functions
103///
104
105namespace
106{
107
108using namespace armnn_driver;
109using namespace android::nn;
110
111// Convenience function to log the reason for failing to convert a model.
112// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
113template<class... Args>
114static bool Fail(const char* formatStr, Args&&... args)
115{
116 ALOGD(formatStr, std::forward<Args>(args)...);
117 return false;
118}
119
120// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
121// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
122template<typename IsLayerSupportedFunc, typename ... Args>
123bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
124{
125 std::vector<char> unsupportedReason(1024+1);
126 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
127 if(isSupported)
128 {
129 return true;
130 }
131 else
132 {
133 std::string sUnsupportedReason(unsupportedReason.data());
134 if (sUnsupportedReason.size() > 0)
135 {
136 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
137 } else
138 {
139 ALOGD("%s: not supported by armnn", funcName);
140 }
141 return false;
142 }
143}
144
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100145template<typename IsLayerSupportedFunc, typename ... Args>
146bool IsLayerSupportedForAnyBackend(const char* funcName,
147 IsLayerSupportedFunc f,
148 const std::vector<armnn::BackendId>& backends,
149 Args&&... args)
150{
151 for (auto&& backend : backends)
152 {
153 if (IsLayerSupported(funcName, f, backend, std::forward<Args>(args)...))
154 {
155 return true;
156 }
157 }
158
159 ALOGD("%s: not supported by any specified backend", funcName);
160 return false;
161}
162
Mike Kellyb5fdf382019-06-11 16:35:25 +0100163template<typename Operand>
164armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100165{
166 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
167}
168
Matthew Bentham912b3622019-05-03 15:49:14 +0100169inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100170{
Matthew Bentham912b3622019-05-03 15:49:14 +0100171 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
172 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
173 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100174}
175
Mike Kellyb5fdf382019-06-11 16:35:25 +0100176#ifdef ARMNN_ANDROID_NN_V1_2
177
178inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
179{
180 return type == V1_2::OperandType::BOOL ||
181 type == V1_2::OperandType::TENSOR_FLOAT16 ||
182 type == V1_2::OperandType::TENSOR_FLOAT32 ||
183 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
184 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
185 type == V1_2::OperandType::TENSOR_INT32;
186}
187
188#endif
189
190inline bool IsBool(V1_0::Operand)
191{
192 return false;
193}
194
195#ifdef ARMNN_ANDROID_NN_V1_2
196
197inline bool IsBool(V1_2::Operand operand)
198{
199 return operand.type == V1_2::OperandType::BOOL;
200}
201
202#endif
203
arovir01b0717b52018-09-05 17:03:25 +0100204void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
205 armnn::INetwork& network)
206{
207 BOOST_ASSERT(startLayer != nullptr);
208 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
209 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
210
211 if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
212 {
213 // If the number of dimensions do not match then we need to add degenerate dimensions
214 // to the "smaller" tensor using a reshape:
215 // Small Big
216 // | |
217 // Reshape |
218 // \ /
219 // Add
220 bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
221
222 LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
223 const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
224
225 LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
226 const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
227
228 const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
229 std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
230 unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
231 for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
232 {
233 reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
234 }
235 armnn::TensorInfo reshapedInfo = smallTensorDims;
236 reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
237 reshapedDims.data() });
238
239 armnn::ReshapeDescriptor reshapeDesc;
240 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
241 armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
242 smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
243 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
244
245 // Connect the outputs from new reshape and original input layer
246 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
247 bigTensorHandle.Connect(startLayer->GetInputSlot(1));
248 }
249 else
250 {
251 input0.Connect(startLayer->GetInputSlot(0));
252 input1.Connect(startLayer->GetInputSlot(1));
253 }
254}
255
256void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
257 android::nn::PaddingScheme scheme)
258{
259 int32_t padHead;
260 int32_t padTail;
261 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
262 outPadHead = boost::numeric_cast<uint32_t>(padHead);
263 outPadTail = boost::numeric_cast<uint32_t>(padTail);
264}
265
Matthew Bentham912b3622019-05-03 15:49:14 +0100266Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100267{
268 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100269 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100270 shape.dimensions = operand.dimensions;
271 shape.scale = operand.scale;
272 shape.offset = operand.zeroPoint;
273 return shape;
274}
275
276// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
277// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
278// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
279// (us, in this case) to ensure they match.
280void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
281 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
282{
283 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
284 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
285 {
286 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
287 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
288 {
289 ALOGW("Bias quantization scale has been modified to match input*weights");
290 biasInfo.SetQuantizationScale(expectedBiasScale);
291 }
292 }
293}
294
295// 4D Tensor Permutations
296const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
297const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
298const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
299const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
300
301// 3D Permutation Vectors
302const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
303const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
304const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
305
306template<typename OSlot>
307armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
308 const armnn::PermutationVector& mappings)
309{
310 // Add swizzle layer
311 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
312
313 BOOST_ASSERT(layer != nullptr);
314
315 // Connect input to swizzle layer
316 input.Connect(layer->GetInputSlot(0));
317
318 // Setup swizzled output
319 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
320 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
321
322 return *layer;
323}
324
325void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
326{
327 // Add swizzle layer
328 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
329 // Connect swizzled input to layer
330 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
331}
332
333armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
334{
335 // Add deswizzle layer
336 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
337 return deswizzleLayer;
338}
339
340// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
341armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
342 LayerInputHandle& input,
343 armnn::IConnectableLayer& firstLayer,
344 armnn::IConnectableLayer& lastLayer)
345{
346 SwizzleIn(network, input, firstLayer, 0);
347 return DeswizzleOut(network, lastLayer, 0);
348}
349
350// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
351armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
352 armnn::IConnectableLayer& layer)
353{
354 return SwizzleInDeswizzleOut(network, input, layer, layer);
355}
356
357bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
358 const armnn::TensorShape & outputShape,
359 uint32_t concatDim)
360{
361 // Validate the output shape is correct given the input shapes (which have just been validated)
362 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
363 if (outputShape.GetNumDimensions() != numDimensions)
364 {
365 return Fail("%s: Output shape has wrong number of dimensions", __func__);
366 }
367
368 unsigned int outputSizeAlongConcatenatedDimension = 0;
369 for (unsigned int i = 0; i < inputShapes.size(); i++)
370 {
371 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
372 }
373
374 for (unsigned int i = 0; i < numDimensions; ++i)
375 {
376 if (i == concatDim)
377 {
378 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
379 {
380 return Fail(
381 "%s: Invalid output shape for dimension %d (%d != %d)",
382 __func__,
383 i,
384 outputShape[i],
385 outputSizeAlongConcatenatedDimension);
386 }
387 }
388 else
389 {
390 if (outputShape[i] != inputShapes[0][i])
391 {
392 return Fail("%s: Invalid output shape", __func__);
393 }
394 }
395 }
396
397 return true;
398}
399
400bool RequiresReshape(armnn::TensorShape & inputShape)
401{
402 return inputShape.GetNumDimensions() < 3;
403}
404
405template<typename OSlot>
406armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
407 armnn::TensorInfo reshapeInfo)
408{
409 armnn::ReshapeDescriptor reshapeDescriptor;
410 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
411
412 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
413 BOOST_ASSERT(reshapeLayer != nullptr);
414
415 // Attach the input layer to the reshape layer
416 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
417 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
418
419 return *reshapeLayer;
420}
421
422void SwizzleInputs(armnn::INetwork& network,
423 std::vector<LayerInputHandle>& inputs,
424 std::vector<armnn::TensorShape>& inputShapes,
425 const armnn::PermutationVector& mapping)
426{
427 if (!mapping.IsEqual(IdentityPermutation4D))
428 {
429 size_t nInputs = inputs.size();
430 for (size_t i=0; i<nInputs; ++i)
431 {
432 // add swizzle layer
433 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
434 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
435 auto& outputInfo = outputSlot.GetTensorInfo();
436 // replace inputs with the swizzled ones
437 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
438 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
439 }
440 }
441}
442
narpra01f176d5a2018-11-18 20:17:48 +0000443bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
444 int32_t & concatDimension,
445 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100446{
narpra01f176d5a2018-11-18 20:17:48 +0000447 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100448 BOOST_ASSERT(numberOfDimensions >= 3);
449
450 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000451 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
452 // or along dimension 0 or 2 for a 3-D tensor.
453 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100454 {
narpra01f176d5a2018-11-18 20:17:48 +0000455 concatDimension = 1;
456 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
457 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100458 }
narpra01f176d5a2018-11-18 20:17:48 +0000459 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100460 {
narpra01f176d5a2018-11-18 20:17:48 +0000461 concatDimension = 0;
462 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
463 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100464 }
narpra01f176d5a2018-11-18 20:17:48 +0000465 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100466}
467
468} // anonymous namespace
469
470namespace armnn_driver
471{
472
473//// Creates an ArmNN activation layer and connects it to the given layer, if the
474//// passed in AndroidNN activation function requires so.
475//// @return The end layer of the sequence of layers built for the given AndroidNN
476//// activation function or nullptr if an error occurred (e.g. unsupported activation).
477//// Note that the end layer matches the input layer if no activation is required
478//// (the sequence of layers has length 1).
479armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
480 ActivationFn activation,
481 armnn::IConnectableLayer* prevLayer,
482 ConversionData& data);
483
484} // namespace armnn_driver
485
486///
487/// Utility templates
488///
489
490namespace armnn_driver
491{
492
493using namespace android::nn;
494
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100495template<typename HalPolicy,
496 typename HalOperand = typename HalPolicy::Operand,
497 typename HalOperation = typename HalPolicy::Operation,
498 typename HalModel = typename HalPolicy::Model>
499const HalOperand* GetInputOperand(const HalOperation& operation,
500 uint32_t inputIndex,
501 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100502 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100503{
504 if (inputIndex >= operation.inputs.size())
505 {
saoste01b8471482018-10-10 09:44:51 +0100506 if (failOnIndexOutOfBounds)
507 {
508 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
509 }
arovir01b0717b52018-09-05 17:03:25 +0100510 return nullptr;
511 }
512
513 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
514 return &model.operands[operation.inputs[inputIndex]];
515}
516
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100517template<typename HalPolicy,
518 typename HalOperand = typename HalPolicy::Operand,
519 typename HalOperation = typename HalPolicy::Operation,
520 typename HalModel = typename HalPolicy::Model>
521const HalOperand* GetOutputOperand(const HalOperation& operation,
522 uint32_t outputIndex,
523 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100524{
525 if (outputIndex >= operation.outputs.size())
526 {
527 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
528 return nullptr;
529 }
530
531 // Model should have been validated beforehand
532 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
533
534 return &model.operands[operation.outputs[outputIndex]];
535}
536
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100537template<typename HalPolicy,
538 typename HalOperand = typename HalPolicy::Operand,
539 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100540const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100541 const HalModel& model,
542 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000543 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100544{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100545 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100546
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100547 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100548 switch (operand.lifetime)
549 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100550 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100551 {
552 // Constant found in model.operandValues
553 valueStart = &model.operandValues[operand.location.offset];
554 break;
555 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100556 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100557 {
558 // Constant specified via a Memory object
559 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
560 break;
561 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100562 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000563 {
564 // An optional input tensor with no values is not an error so should not register as a fail
565 if (optional)
566 {
567 valueStart = nullptr;
568 break;
569 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100570 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000571 }
arovir01b0717b52018-09-05 17:03:25 +0100572 default:
573 {
574 // Unsupported/invalid (e.g. can't get value of an input to the model)
575 Fail("%s: unsupported/invalid operand lifetime: %s",
576 __func__, toString(operand.lifetime).c_str());
577 valueStart = nullptr;
578 }
579 }
580
581 return valueStart;
582}
583
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100584template<typename HalPolicy,
585 typename HalOperand = typename HalPolicy::Operand,
586 typename HalModel = typename HalPolicy::Model>
587ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
588 const HalModel& model,
589 const ConversionData& data,
590 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
591 const armnn::TensorShape* overrideTensorShape = nullptr,
592 bool optional = false)
593{
594 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
595
596 if (!IsOperandTypeSupportedForTensors(operand.type))
597 {
598 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
599 return ConstTensorPin();
600 }
601
602 if (!optional &&
603 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
604 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
605 operand.lifetime != HalOperandLifeTime::NO_VALUE)
606 {
607 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
608 return ConstTensorPin();
609 }
610
611 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
612 if (!valueStart)
613 {
614 if (optional)
615 {
616 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
617 return ConstTensorPin(true);
618 }
619 // mandatory tensor with no values
620 Fail("%s: failed to get operand address", __func__);
621 return ConstTensorPin();
622 }
623
624 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
625 if (overrideTensorShape != nullptr)
626 {
627 tensorInfo.SetShape(*overrideTensorShape);
628 }
629 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
630}
631
632template<typename HalPolicy,
633 typename HalOperation = typename HalPolicy::Operation,
634 typename HalModel = typename HalPolicy::Model>
635ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
636 uint32_t inputIndex,
637 const HalModel& model,
638 const ConversionData& data,
639 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
640 const armnn::TensorShape* overrideTensorShape = nullptr,
641 bool optional = false)
642{
643 using HalOperand = typename HalPolicy::Operand;
644
645 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
646 if (!operand)
647 {
648 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
649 return ConstTensorPin();
650 }
651 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
652 model,
653 data,
654 dimensionMappings,
655 overrideTensorShape,
656 optional);
657}
658
659template<typename HalPolicy,
660 typename OutputType,
661 typename HalOperandType = typename HalPolicy::OperandType,
662 typename HalOperation = typename HalPolicy::Operation,
663 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100664bool GetInputScalar(const HalOperation& operation,
665 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100666 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100667 OutputType& outValue,
668 const HalModel& model,
669 const ConversionData& data)
670{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100671 using HalOperand = typename HalPolicy::Operand;
672
673 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100674 if (!operand)
675 {
676 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
677 }
678
679 if (operand->type != type)
680 {
681 return Fail("%s: unexpected operand type: %s (should be %s)",
682 __func__, toString(operand->type).c_str(), toString(type).c_str());
683 }
684
685 if (operand->location.length != sizeof(OutputType))
686 {
687 return Fail("%s: incorrect operand location length: %i (should be %i)",
688 __func__, operand->location.length, sizeof(OutputType));
689 }
690
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100691 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100692 if (!valueAddress)
693 {
694 return Fail("%s: failed to get address for operand", __func__);
695 }
696
697 outValue = *(static_cast<const OutputType*>(valueAddress));
698 return true;
699}
700
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100701template<typename HalPolicy,
702 typename HalOperation = typename HalPolicy::Operation,
703 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100704bool GetInputInt32(const HalOperation& operation,
705 uint32_t inputIndex,
706 int32_t& outValue,
707 const HalModel& model,
708 const ConversionData& data)
709{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100710 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100711}
712
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100713template<typename HalPolicy,
714 typename HalOperation = typename HalPolicy::Operation,
715 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100716bool GetInputFloat32(const HalOperation& operation,
717 uint32_t inputIndex,
718 float& outValue,
719 const HalModel& model,
720 const ConversionData& data)
721{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100722 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100723}
724
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100725template<typename HalPolicy,
726 typename HalOperation = typename HalPolicy::Operation,
727 typename HalOperandType = typename HalPolicy::OperandType,
728 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100729bool GetInputActivationFunctionImpl(const HalOperation& operation,
730 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100731 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100732 ActivationFn& outActivationFunction,
733 const HalModel& model,
734 const ConversionData& data)
735{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100736 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100737 {
738 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
739 __func__,
740 toString(type).c_str(),
741 toString(OperandType::INT32).c_str(),
742 toString(OperandType::TENSOR_INT32).c_str());
743 }
744
745 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100746 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100747 {
748 return Fail("%s: failed to get activation input value", __func__);
749 }
750 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
751 return true;
752}
753
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100754template<typename HalPolicy,
755 typename HalOperation = typename HalPolicy::Operation,
756 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100757bool GetInputActivationFunction(const HalOperation& operation,
758 uint32_t inputIndex,
759 ActivationFn& outActivationFunction,
760 const HalModel& model,
761 const ConversionData& data)
762{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100763 return GetInputActivationFunctionImpl<HalPolicy>(operation,
764 inputIndex,
765 HalPolicy::OperandType::INT32,
766 outActivationFunction,
767 model,
768 data);
arovir01b0717b52018-09-05 17:03:25 +0100769}
770
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100771template<typename HalPolicy,
772 typename HalOperation = typename HalPolicy::Operation,
773 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100774bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
775 uint32_t inputIndex,
776 ActivationFn& outActivationFunction,
777 const HalModel& model,
778 const ConversionData& data)
779{
780 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100781 return GetInputActivationFunctionImpl<HalPolicy>(operation,
782 inputIndex,
783 HalPolicy::OperandType::INT32,
784 outActivationFunction,
785 model,
786 data);
arovir01b0717b52018-09-05 17:03:25 +0100787}
788
789
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100790template<typename HalPolicy,
791 typename HalOperation = typename HalPolicy::Operation,
792 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100793bool GetOptionalInputActivation(const HalOperation& operation,
794 uint32_t inputIndex,
795 ActivationFn& activationFunction,
796 const HalModel& model,
797 const ConversionData& data)
798{
799 if (operation.inputs.size() <= inputIndex)
800 {
801 activationFunction = ActivationFn::kActivationNone;
802 }
803 else
804 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100805 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100806 {
807 return Fail("%s: Operation has invalid inputs", __func__);
808 }
809 }
810 return true;
811}
812
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100813template<typename HalPolicy,
814 typename ConvolutionDescriptor,
815 typename HalOperation = typename HalPolicy::Operation,
816 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100817bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
818 uint32_t dilationXIndex,
819 ConvolutionDescriptor& descriptor,
820 const HalModel& model,
821 const ConversionData& data)
822{
823 bool success = true;
824 if (operation.inputs.size() >= dilationXIndex + 2)
825 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100826 success &= GetInputScalar<HalPolicy>(operation,
827 dilationXIndex,
828 HalPolicy::OperandType::INT32,
829 descriptor.m_DilationX,
830 model,
831 data);
832 success &= GetInputScalar<HalPolicy>(operation,
833 dilationXIndex + 1,
834 HalPolicy::OperandType::INT32,
835 descriptor.m_DilationY,
836 model,
837 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100838 }
839
840 return success;
841}
842
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100843template<typename HalPolicy,
844 typename HalOperand = typename HalPolicy::Operand,
845 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100846bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100847 std::vector<int32_t>& outValues,
848 const HalModel& model,
849 const ConversionData& data)
850{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100851 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100852 {
853 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
854 }
855
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100856 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100857 if (!startAddress)
858 {
859 return Fail("%s: failed to get operand address", __func__, operand.type);
860 }
861
862 // Check number of bytes is sensible
863 const uint32_t numBytes = operand.location.length;
864 if (numBytes % sizeof(int32_t) != 0)
865 {
866 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
867 __func__, numBytes, sizeof(int32_t));
868 }
869
870 outValues.resize(numBytes / sizeof(int32_t));
871 memcpy(outValues.data(), startAddress, numBytes);
872 return true;
873}
874
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100875template<typename HalPolicy,
876 typename HalOperation = typename HalPolicy::Operation,
877 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100878bool GetInputPaddingScheme(const HalOperation& operation,
879 uint32_t inputIndex,
880 PaddingScheme& outPaddingScheme,
881 const HalModel& model,
882 const ConversionData& data)
883{
884 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100885 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100886 {
887 return Fail("%s: failed to get padding scheme input value", __func__);
888 }
889
890 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
891 return true;
892}
893
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100894template<typename HalPolicy,
895 typename HalOperation = typename HalPolicy::Operation,
896 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100897LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
898 uint32_t inputIndex,
899 const HalModel& model,
900 ConversionData& data)
901{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100902 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +0100903 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100904 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
905
906 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100907 if (!operand)
908 {
909 Fail("%s: failed to get input operand %i", __func__, inputIndex);
910 return LayerInputHandle();
911 }
912
913 if (!IsOperandTypeSupportedForTensors(operand->type))
914 {
915 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
916 return LayerInputHandle();
917 }
918
Sadik Armagan44bcc022019-06-18 17:21:36 +0100919 try
arovir01b0717b52018-09-05 17:03:25 +0100920 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100921 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
arovir01b0717b52018-09-05 17:03:25 +0100922
Sadik Armagan44bcc022019-06-18 17:21:36 +0100923 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +0100924 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100925 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
926 case HalOperandLifeTime::MODEL_INPUT:
927 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +0100928 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100929 // The tensor is either an operand internal to the model, or a model input.
930 // It can be associated with an ArmNN output slot for an existing layer.
931
932 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
933 const uint32_t operandIndex = operation.inputs[inputIndex];
934 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
935 break;
936 }
937 case HalOperandLifeTime::CONSTANT_COPY:
938 case HalOperandLifeTime::CONSTANT_REFERENCE:
939 {
940 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
941 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
942 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +0100943 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100944 if (!IsLayerSupportedForAnyBackend(__func__,
945 armnn::IsConstantSupported,
946 data.m_Backends,
947 tensorPin.GetConstTensor().GetInfo()))
948 {
949 return LayerInputHandle();
950 }
951
952 armnn::IConnectableLayer* constantLayer =
953 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
954 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
955 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
956
957 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
958 }
959 else
960 {
961 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100962 return LayerInputHandle();
963 }
Sadik Armagan44bcc022019-06-18 17:21:36 +0100964 break;
arovir01b0717b52018-09-05 17:03:25 +0100965 }
Sadik Armagan44bcc022019-06-18 17:21:36 +0100966 default:
arovir01b0717b52018-09-05 17:03:25 +0100967 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100968 // Unsupported lifetime for an input tensor
969 Fail("%s: unsupported lifetime for input tensor: %s",
970 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +0100971 return LayerInputHandle();
972 }
arovir01b0717b52018-09-05 17:03:25 +0100973 }
Sadik Armagan44bcc022019-06-18 17:21:36 +0100974 }
975 catch (UnsupportedOperand<HalOperandType>& e)
976 {
977 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
978 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +0100979 }
980}
981
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100982template<typename HalPolicy,
983 typename HalOperation = typename HalPolicy::Operation,
984 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100985bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
986 uint32_t operationOutputIndex,
987 armnn::IConnectableLayer& layer,
988 uint32_t layerOutputIndex,
989 const HalModel& model,
990 ConversionData& data)
991{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100992 using HalOperand = typename HalPolicy::Operand;
993
994 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100995 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
996 {
997 return false;
998 }
999
1000 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1001
1002 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1003 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1004
1005 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1006
1007 return true;
1008}
1009
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001010template<typename HalPolicy,
1011 typename HalOperation = typename HalPolicy::Operation,
1012 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001013armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1014 uint32_t inputIndex,
1015 const HalModel& model,
1016 ConversionData& data)
1017{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001018 using HalOperand = typename HalPolicy::Operand;
1019
1020 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001021 if (!operand)
1022 {
1023 return armnn::DataLayout::NHWC;
1024 }
1025
1026 if (!IsBool(*operand))
1027 {
1028 return armnn::DataLayout::NHWC;
1029 }
1030
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001031 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001032 if (!valueAddress)
1033 {
1034 return armnn::DataLayout::NHWC;
1035 }
1036
1037 if (*(static_cast<const bool*>(valueAddress)))
1038 {
1039 return armnn::DataLayout::NCHW;
1040 }
1041 else
1042 {
1043 return armnn::DataLayout::NHWC;
1044 }
1045}
1046
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001047template<typename HalPolicy,
1048 typename HalOperation = typename HalPolicy::Operation,
1049 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001050bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1051 uint32_t outputIndex,
1052 armnn::IConnectableLayer& layer,
1053 const HalModel& model,
1054 ConversionData& data)
1055{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001056 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, outputIndex, layer, outputIndex, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001057}
1058
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001059template<typename HalPolicy,
1060 typename HalOperation = typename HalPolicy::Operation,
1061 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001062bool ConvertToActivation(const HalOperation& operation,
1063 const char* operationName,
1064 const armnn::ActivationDescriptor& activationDesc,
1065 const HalModel& model,
1066 ConversionData& data)
1067{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001068 using HalOperand = typename HalPolicy::Operand;
1069
1070 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001071 if (!input.IsValid())
1072 {
1073 return Fail("%s: Input 0 is invalid", operationName);
1074 }
1075
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001076 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001077 if (!outputOperand)
1078 {
1079 return false;
1080 }
1081 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001082 if (!IsLayerSupportedForAnyBackend(__func__,
1083 armnn::IsActivationSupported,
1084 data.m_Backends,
1085 input.GetTensorInfo(),
1086 outInfo,
1087 activationDesc))
arovir01b0717b52018-09-05 17:03:25 +01001088 {
1089 return false;
1090 }
1091
1092 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1093 BOOST_ASSERT(layer != nullptr);
1094 input.Connect(layer->GetInputSlot(0));
1095
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001096 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001097}
1098
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001099template<typename HalPolicy,
1100 typename HalOperation = typename HalPolicy::Operation,
1101 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001102bool ConvertPooling2d(const HalOperation& operation,
1103 const char* operationName,
1104 armnn::PoolingAlgorithm poolType,
1105 const HalModel& model,
1106 ConversionData& data)
1107{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001108 using HalOperand = typename HalPolicy::Operand;
1109 using HalOperandType = typename HalPolicy::OperandType;
1110
1111 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001112 if (!input.IsValid())
1113 {
1114 return Fail("%s: Could not read input 0", operationName);
1115 }
1116
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001117 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001118 if (!output)
1119 {
1120 return Fail("%s: Could not read output 0", __func__);
1121 }
1122
1123 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1124 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1125
arovir01b0717b52018-09-05 17:03:25 +01001126 armnn::Pooling2dDescriptor desc;
1127 desc.m_PoolType = poolType;
1128 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001129 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001130
1131 ActivationFn activation;
1132
1133 if (operation.inputs.size() == 7)
1134 {
1135 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1136 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001137 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1138 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1139 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1140 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1141 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1142 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001143 {
1144 return Fail("%s: Operation has invalid inputs", operationName);
1145 }
1146
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001147 const unsigned int inputWidth = inputInfo.GetShape()[2];
1148 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001149
1150 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1151 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1152 }
1153 else
1154 {
1155 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001156 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1157 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1158 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1159 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1160 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1161 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1162 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1163 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1164 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001165 {
1166 return Fail("%s: Operation has invalid inputs", operationName);
1167 }
1168 }
1169
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001170 if (!IsLayerSupportedForAnyBackend(__func__,
1171 armnn::IsPooling2dSupported,
1172 data.m_Backends,
1173 inputInfo,
1174 outputInfo,
1175 desc))
arovir01b0717b52018-09-05 17:03:25 +01001176 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001177 return false;
arovir01b0717b52018-09-05 17:03:25 +01001178 }
arovir01b0717b52018-09-05 17:03:25 +01001179
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001180 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1181 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001182 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001183 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001184 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001185
1186 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1187 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001188 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001189 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001190 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001191
1192 input.Connect(pooling2dLayer->GetInputSlot(0));
1193
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001194 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001195}
1196
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001197template<typename HalPolicy,
1198 typename HalOperation = typename HalPolicy::Operation,
1199 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001200bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1201{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001202 using HalOperand = typename HalPolicy::Operand;
1203 using HalOperandType = typename HalPolicy::OperandType;
1204
1205 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001206 if (!input.IsValid())
1207 {
1208 return Fail("%s: Operation has invalid inputs", __func__);
1209 }
1210
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001211 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001212 if (!output)
1213 {
1214 return Fail("%s: Could not read output 0", __func__);
1215 }
1216
1217 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1218 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1219
1220 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001221 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1222 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001223
1224 if (!weightsPin.IsValid() || !biasPin.IsValid())
1225 {
1226 return Fail("%s: Operation has invalid inputs", __func__);
1227 }
1228
1229 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1230 armnn::ConstTensor bias = biasPin.GetConstTensor();
1231 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1232
1233 armnn::Convolution2dDescriptor desc;
1234 desc.m_DataLayout = armnn::DataLayout::NHWC;
1235 ActivationFn activation;
1236
1237 if (operation.inputs.size() >= 10)
1238 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001239 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1240 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1241 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1242 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1243 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1244 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1245 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data) ||
1246 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 11, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001247 {
1248 return Fail("%s: Operation has invalid inputs", __func__);
1249 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001250 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001251 }
1252 else if (operation.inputs.size() >= 7)
1253 {
1254 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001255 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1256 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1257 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1258 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data) ||
1259 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 8, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001260 {
1261 return Fail("%s: Operation has invalid inputs", __func__);
1262 }
1263
1264 const uint32_t kernelX = weights.GetShape()[2];
1265 const uint32_t kernelY = weights.GetShape()[1];
1266 const uint32_t inputX = inputInfo.GetShape()[2];
1267 const uint32_t inputY = inputInfo.GetShape()[1];
1268
1269 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1270 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1271
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001272 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001273 }
1274 else
1275 {
1276 return Fail("%s: Unsupported number of operation inputs", __func__);
1277 }
1278
1279 desc.m_BiasEnabled = true;
1280 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1281
1282 if (!IsLayerSupportedForAnyBackend(__func__,
1283 armnn::IsConvolution2dSupported,
1284 data.m_Backends,
1285 inputInfo,
1286 outputInfo,
1287 desc,
1288 weights.GetInfo(),
1289 biases))
1290 {
1291 return false;
1292 }
1293
1294 armnn::IConnectableLayer* startLayer =
1295 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1296
1297 if (!startLayer)
1298 {
1299 return Fail("%s: AddConvolution2dLayer failed", __func__);
1300 }
1301
1302 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1303
1304 if (!endLayer)
1305 {
1306 return Fail("%s: ProcessActivation failed", __func__);
1307 }
1308
1309 input.Connect(startLayer->GetInputSlot(0));
1310
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001311 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001312}
1313
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001314template<typename HalPolicy,
1315 typename HalOperation = typename HalPolicy::Operation,
1316 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001317bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1318{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001319 using HalOperand = typename HalPolicy::Operand;
1320 using HalOperandType = typename HalPolicy::OperandType;
1321
1322 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001323
1324 if (!input.IsValid())
1325 {
1326 return Fail("%s: Operation has invalid inputs", __func__);
1327 }
1328
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001329 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001330
1331 if (!output)
1332 {
1333 return Fail("%s: Could not read output 0", __func__);
1334 }
1335
1336 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1337 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1338
1339 // ArmNN does not currently support non-fixed weights or bias
1340
1341 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001342 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001343
1344 if (weightsOperand == nullptr)
1345 {
1346 return Fail("%s: Operand is invalid", __func__);
1347 }
1348 armnn::DepthwiseConvolution2dDescriptor desc;
1349 desc.m_DataLayout = armnn::DataLayout::NHWC;
1350
1351 // Look ahead to find the optional DataLayout, if present
1352 if (operation.inputs.size() >= 12)
1353 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001354 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 11, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001355 }
1356 else if (operation.inputs.size() >= 9)
1357 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001358 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 8, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001359 }
1360
1361 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1362 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
1363 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1364 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1365
1366 // Reinterpret weight data as [ H, W, I, M ]
1367 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1368 weightsOperand->dimensions[2],
1369 inputInfo.GetShape()[channelsIndex],
1370 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
1371
1372 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1373 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1374
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001375 const ConstTensorPin weightsPin =
1376 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1377 1,
1378 model,
1379 data,
1380 HWIMToMIHW,
1381 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001382
1383 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001384 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001385
1386 if (!weightsPin.IsValid() || !biasPin.IsValid())
1387 {
1388 return Fail("%s: Operation has invalid inputs", __func__);
1389 }
1390
1391 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1392 armnn::ConstTensor bias = biasPin.GetConstTensor();
1393 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1394
1395 ActivationFn activation;
1396
1397 if (operation.inputs.size() >= 11)
1398 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001399 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1400 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1401 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1402 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1403 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1404 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1405 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data) ||
1406 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 12, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001407 {
1408 return Fail("%s: Operation has invalid inputs", __func__);
1409 }
1410 }
1411 else if (operation.inputs.size() >= 8)
1412 {
1413 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001414 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1415 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1416 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1417 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data) ||
1418 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 9, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001419 {
1420 return Fail("%s: Operation has invalid inputs", __func__);
1421 }
1422
1423 const uint32_t kernelX = weights.GetShape()[3];
1424 const uint32_t kernelY = weights.GetShape()[2];
1425 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1426 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1427
1428 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1429 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1430 }
1431 else
1432 {
1433 return Fail("%s: Unsupported number of operation inputs", __func__);
1434 }
1435
1436 desc.m_BiasEnabled = true;
1437 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1438
1439 if (!IsLayerSupportedForAnyBackend(__func__,
1440 armnn::IsDepthwiseConvolutionSupported,
1441 data.m_Backends,
1442 inputInfo,
1443 outputInfo,
1444 desc,
1445 weights.GetInfo(),
1446 biases))
1447 {
1448 return false;
1449 }
1450
1451 armnn::IConnectableLayer* startLayer =
1452 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1453 if (!startLayer)
1454 {
1455 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1456 }
1457
1458 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1459 if (!endLayer)
1460 {
1461 return Fail("%s: ProcessActivation failed", __func__);
1462 }
1463
1464 input.Connect(startLayer->GetInputSlot(0));
1465
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001466 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001467}
1468
saoste01b8471482018-10-10 09:44:51 +01001469} // namespace armnn_driver