blob: 36bc4ae189f8bccbfcd179bb2b4093ec13eb53eb [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010011#include "armnn/src/armnnUtils/Permute.hpp"
12#include "Utils.hpp"
13
14#include <ActivationFunctor.h>
15#include <CpuExecutor.h>
16#include <OperationsUtils.h>
17
18#include <boost/assert.hpp>
19#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010020#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010021#include <boost/test/tools/floating_point_comparison.hpp>
22
23#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010024#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010025
26namespace armnn_driver
27{
28
29///
30/// Helper classes
31///
32
33struct ConversionData
34{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010035 ConversionData(const std::vector<armnn::BackendId>& backends)
36 : m_Backends(backends)
37 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010038 {}
39
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010041 armnn::INetworkPtr m_Network;
42 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
43 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
44};
45
46class LayerInputHandle
47{
48public:
49 LayerInputHandle();
50 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
51
52 bool IsValid() const;
53
54 void Connect(armnn::IInputSlot& inputSlot);
55
56 const armnn::TensorInfo& GetTensorInfo() const;
57
58private:
59 armnn::IOutputSlot* m_OutputSlot;
60 bool m_Valid;
61 armnn::TensorInfo m_TensorInfo;
62};
63
64class ConstTensorPin
65{
66public:
67 // Creates an invalid tensor pin (can be used to signal errors)
68 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
69 ConstTensorPin(bool optional = false);
70
71 // @param tensorInfo TensorInfo associated with the tensor.
72 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
73 // the model being converted.
74 // @param numBytes Number of bytes for the tensor data.
75 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
76 const armnn::PermutationVector& mappings);
77
78 ConstTensorPin(const ConstTensorPin& other) = delete;
79 ConstTensorPin(ConstTensorPin&& other) = default;
80
81 bool IsValid() const;
82 bool IsOptional() const;
83
84 const armnn::ConstTensor& GetConstTensor() const;
85 const armnn::ConstTensor* GetConstTensorPtr() const;
86
87private:
88 armnn::ConstTensor m_ConstTensor;
89
90 // Owned memory for swizzled tensor data, only required if the tensor needed
91 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
92 // the pools associated with the model being converted.
93 std::vector<uint8_t> m_SwizzledTensorData;
94
95 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
96 bool m_Optional;
97};
98
99} // namespace armnn_driver
100
101///
102/// Utility functions
103///
104
105namespace
106{
107
108using namespace armnn_driver;
109using namespace android::nn;
110
111// Convenience function to log the reason for failing to convert a model.
112// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
113template<class... Args>
114static bool Fail(const char* formatStr, Args&&... args)
115{
116 ALOGD(formatStr, std::forward<Args>(args)...);
117 return false;
118}
119
120// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
121// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
122template<typename IsLayerSupportedFunc, typename ... Args>
123bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
124{
125 std::vector<char> unsupportedReason(1024+1);
126 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
127 if(isSupported)
128 {
129 return true;
130 }
131 else
132 {
133 std::string sUnsupportedReason(unsupportedReason.data());
134 if (sUnsupportedReason.size() > 0)
135 {
136 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
137 } else
138 {
139 ALOGD("%s: not supported by armnn", funcName);
140 }
141 return false;
142 }
143}
144
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100145template<typename IsLayerSupportedFunc, typename ... Args>
146bool IsLayerSupportedForAnyBackend(const char* funcName,
147 IsLayerSupportedFunc f,
148 const std::vector<armnn::BackendId>& backends,
149 Args&&... args)
150{
151 for (auto&& backend : backends)
152 {
153 if (IsLayerSupported(funcName, f, backend, std::forward<Args>(args)...))
154 {
155 return true;
156 }
157 }
158
159 ALOGD("%s: not supported by any specified backend", funcName);
160 return false;
161}
162
Mike Kellyb5fdf382019-06-11 16:35:25 +0100163template<typename Operand>
164armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100165{
166 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
167}
168
Matthew Bentham912b3622019-05-03 15:49:14 +0100169inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100170{
Matthew Bentham912b3622019-05-03 15:49:14 +0100171 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
172 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
173 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100174}
175
Mike Kellyb5fdf382019-06-11 16:35:25 +0100176#ifdef ARMNN_ANDROID_NN_V1_2
177
178inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
179{
180 return type == V1_2::OperandType::BOOL ||
181 type == V1_2::OperandType::TENSOR_FLOAT16 ||
182 type == V1_2::OperandType::TENSOR_FLOAT32 ||
183 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
184 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
185 type == V1_2::OperandType::TENSOR_INT32;
186}
187
188#endif
189
190inline bool IsBool(V1_0::Operand)
191{
192 return false;
193}
194
195#ifdef ARMNN_ANDROID_NN_V1_2
196
197inline bool IsBool(V1_2::Operand operand)
198{
199 return operand.type == V1_2::OperandType::BOOL;
200}
201
202#endif
203
arovir01b0717b52018-09-05 17:03:25 +0100204void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
205 armnn::INetwork& network)
206{
207 BOOST_ASSERT(startLayer != nullptr);
208 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
209 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
210
211 if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
212 {
213 // If the number of dimensions do not match then we need to add degenerate dimensions
214 // to the "smaller" tensor using a reshape:
215 // Small Big
216 // | |
217 // Reshape |
218 // \ /
219 // Add
220 bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
221
222 LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
223 const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
224
225 LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
226 const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
227
228 const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
229 std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
230 unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
231 for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
232 {
233 reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
234 }
235 armnn::TensorInfo reshapedInfo = smallTensorDims;
236 reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
237 reshapedDims.data() });
238
239 armnn::ReshapeDescriptor reshapeDesc;
240 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
241 armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
242 smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
243 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
244
245 // Connect the outputs from new reshape and original input layer
246 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
247 bigTensorHandle.Connect(startLayer->GetInputSlot(1));
248 }
249 else
250 {
251 input0.Connect(startLayer->GetInputSlot(0));
252 input1.Connect(startLayer->GetInputSlot(1));
253 }
254}
255
256void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
257 android::nn::PaddingScheme scheme)
258{
259 int32_t padHead;
260 int32_t padTail;
261 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
262 outPadHead = boost::numeric_cast<uint32_t>(padHead);
263 outPadTail = boost::numeric_cast<uint32_t>(padTail);
264}
265
Matthew Bentham912b3622019-05-03 15:49:14 +0100266Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100267{
268 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100269 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100270 shape.dimensions = operand.dimensions;
271 shape.scale = operand.scale;
272 shape.offset = operand.zeroPoint;
273 return shape;
274}
275
276// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
277// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
278// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
279// (us, in this case) to ensure they match.
280void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
281 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
282{
283 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
284 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
285 {
286 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
287 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
288 {
289 ALOGW("Bias quantization scale has been modified to match input*weights");
290 biasInfo.SetQuantizationScale(expectedBiasScale);
291 }
292 }
293}
294
295// 4D Tensor Permutations
296const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
297const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
298const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
299const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
300
301// 3D Permutation Vectors
302const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
303const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
304const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
305
306template<typename OSlot>
307armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
308 const armnn::PermutationVector& mappings)
309{
310 // Add swizzle layer
311 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
312
313 BOOST_ASSERT(layer != nullptr);
314
315 // Connect input to swizzle layer
316 input.Connect(layer->GetInputSlot(0));
317
318 // Setup swizzled output
319 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
320 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
321
322 return *layer;
323}
324
325void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
326{
327 // Add swizzle layer
328 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
329 // Connect swizzled input to layer
330 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
331}
332
333armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
334{
335 // Add deswizzle layer
336 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
337 return deswizzleLayer;
338}
339
340// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
341armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
342 LayerInputHandle& input,
343 armnn::IConnectableLayer& firstLayer,
344 armnn::IConnectableLayer& lastLayer)
345{
346 SwizzleIn(network, input, firstLayer, 0);
347 return DeswizzleOut(network, lastLayer, 0);
348}
349
350// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
351armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
352 armnn::IConnectableLayer& layer)
353{
354 return SwizzleInDeswizzleOut(network, input, layer, layer);
355}
356
357bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
358 const armnn::TensorShape & outputShape,
359 uint32_t concatDim)
360{
361 // Validate the output shape is correct given the input shapes (which have just been validated)
362 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
363 if (outputShape.GetNumDimensions() != numDimensions)
364 {
365 return Fail("%s: Output shape has wrong number of dimensions", __func__);
366 }
367
368 unsigned int outputSizeAlongConcatenatedDimension = 0;
369 for (unsigned int i = 0; i < inputShapes.size(); i++)
370 {
371 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
372 }
373
374 for (unsigned int i = 0; i < numDimensions; ++i)
375 {
376 if (i == concatDim)
377 {
378 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
379 {
380 return Fail(
381 "%s: Invalid output shape for dimension %d (%d != %d)",
382 __func__,
383 i,
384 outputShape[i],
385 outputSizeAlongConcatenatedDimension);
386 }
387 }
388 else
389 {
390 if (outputShape[i] != inputShapes[0][i])
391 {
392 return Fail("%s: Invalid output shape", __func__);
393 }
394 }
395 }
396
397 return true;
398}
399
400bool RequiresReshape(armnn::TensorShape & inputShape)
401{
402 return inputShape.GetNumDimensions() < 3;
403}
404
405template<typename OSlot>
406armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
407 armnn::TensorInfo reshapeInfo)
408{
409 armnn::ReshapeDescriptor reshapeDescriptor;
410 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
411
412 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
413 BOOST_ASSERT(reshapeLayer != nullptr);
414
415 // Attach the input layer to the reshape layer
416 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
417 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
418
419 return *reshapeLayer;
420}
421
422void SwizzleInputs(armnn::INetwork& network,
423 std::vector<LayerInputHandle>& inputs,
424 std::vector<armnn::TensorShape>& inputShapes,
425 const armnn::PermutationVector& mapping)
426{
427 if (!mapping.IsEqual(IdentityPermutation4D))
428 {
429 size_t nInputs = inputs.size();
430 for (size_t i=0; i<nInputs; ++i)
431 {
432 // add swizzle layer
433 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
434 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
435 auto& outputInfo = outputSlot.GetTensorInfo();
436 // replace inputs with the swizzled ones
437 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
438 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
439 }
440 }
441}
442
narpra01f176d5a2018-11-18 20:17:48 +0000443bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
444 int32_t & concatDimension,
445 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100446{
narpra01f176d5a2018-11-18 20:17:48 +0000447 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100448 BOOST_ASSERT(numberOfDimensions >= 3);
449
450 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000451 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
452 // or along dimension 0 or 2 for a 3-D tensor.
453 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100454 {
narpra01f176d5a2018-11-18 20:17:48 +0000455 concatDimension = 1;
456 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
457 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100458 }
narpra01f176d5a2018-11-18 20:17:48 +0000459 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100460 {
narpra01f176d5a2018-11-18 20:17:48 +0000461 concatDimension = 0;
462 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
463 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100464 }
narpra01f176d5a2018-11-18 20:17:48 +0000465 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100466}
467
468} // anonymous namespace
469
470namespace armnn_driver
471{
472
473//// Creates an ArmNN activation layer and connects it to the given layer, if the
474//// passed in AndroidNN activation function requires so.
475//// @return The end layer of the sequence of layers built for the given AndroidNN
476//// activation function or nullptr if an error occurred (e.g. unsupported activation).
477//// Note that the end layer matches the input layer if no activation is required
478//// (the sequence of layers has length 1).
479armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
480 ActivationFn activation,
481 armnn::IConnectableLayer* prevLayer,
482 ConversionData& data);
483
484} // namespace armnn_driver
485
486///
487/// Utility templates
488///
489
490namespace armnn_driver
491{
492
493using namespace android::nn;
494
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100495template<typename HalPolicy,
496 typename HalOperand = typename HalPolicy::Operand,
497 typename HalOperation = typename HalPolicy::Operation,
498 typename HalModel = typename HalPolicy::Model>
499const HalOperand* GetInputOperand(const HalOperation& operation,
500 uint32_t inputIndex,
501 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100502 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100503{
504 if (inputIndex >= operation.inputs.size())
505 {
saoste01b8471482018-10-10 09:44:51 +0100506 if (failOnIndexOutOfBounds)
507 {
508 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
509 }
arovir01b0717b52018-09-05 17:03:25 +0100510 return nullptr;
511 }
512
513 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
514 return &model.operands[operation.inputs[inputIndex]];
515}
516
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100517template<typename HalPolicy,
518 typename HalOperand = typename HalPolicy::Operand,
519 typename HalOperation = typename HalPolicy::Operation,
520 typename HalModel = typename HalPolicy::Model>
521const HalOperand* GetOutputOperand(const HalOperation& operation,
522 uint32_t outputIndex,
523 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100524{
525 if (outputIndex >= operation.outputs.size())
526 {
527 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
528 return nullptr;
529 }
530
531 // Model should have been validated beforehand
532 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
533
534 return &model.operands[operation.outputs[outputIndex]];
535}
536
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100537template<typename HalPolicy,
538 typename HalOperand = typename HalPolicy::Operand,
539 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100540const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100541 const HalModel& model,
542 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000543 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100544{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100545 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100546
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100547 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100548 switch (operand.lifetime)
549 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100550 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100551 {
552 // Constant found in model.operandValues
553 valueStart = &model.operandValues[operand.location.offset];
554 break;
555 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100556 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100557 {
558 // Constant specified via a Memory object
559 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
560 break;
561 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100562 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000563 {
564 // An optional input tensor with no values is not an error so should not register as a fail
565 if (optional)
566 {
567 valueStart = nullptr;
568 break;
569 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100570 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000571 }
arovir01b0717b52018-09-05 17:03:25 +0100572 default:
573 {
574 // Unsupported/invalid (e.g. can't get value of an input to the model)
575 Fail("%s: unsupported/invalid operand lifetime: %s",
576 __func__, toString(operand.lifetime).c_str());
577 valueStart = nullptr;
578 }
579 }
580
581 return valueStart;
582}
583
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100584template<typename HalPolicy,
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100585 typename HalOperation = typename HalPolicy::Operation,
586 typename HalModel = typename HalPolicy::Model,
587 typename HalOperandType = typename HalPolicy::OperandType>
588bool GetOperandType(const HalOperation& operation,
589 uint32_t inputIndex,
590 const HalModel& model,
591 HalOperandType& type)
592{
593 using HalOperand = typename HalPolicy::Operand;
594
595 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
596 if (!operand)
597 {
598 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
599 }
600
601 type = operand->type;
602 return true;
603}
604
605template<typename HalPolicy,
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100606 typename HalOperand = typename HalPolicy::Operand,
607 typename HalModel = typename HalPolicy::Model>
608ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
609 const HalModel& model,
610 const ConversionData& data,
611 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
612 const armnn::TensorShape* overrideTensorShape = nullptr,
613 bool optional = false)
614{
615 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
616
617 if (!IsOperandTypeSupportedForTensors(operand.type))
618 {
619 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
620 return ConstTensorPin();
621 }
622
623 if (!optional &&
624 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
625 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
626 operand.lifetime != HalOperandLifeTime::NO_VALUE)
627 {
628 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
629 return ConstTensorPin();
630 }
631
632 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
633 if (!valueStart)
634 {
635 if (optional)
636 {
637 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
638 return ConstTensorPin(true);
639 }
640 // mandatory tensor with no values
641 Fail("%s: failed to get operand address", __func__);
642 return ConstTensorPin();
643 }
644
645 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
646 if (overrideTensorShape != nullptr)
647 {
648 tensorInfo.SetShape(*overrideTensorShape);
649 }
650 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
651}
652
653template<typename HalPolicy,
654 typename HalOperation = typename HalPolicy::Operation,
655 typename HalModel = typename HalPolicy::Model>
656ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
657 uint32_t inputIndex,
658 const HalModel& model,
659 const ConversionData& data,
660 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
661 const armnn::TensorShape* overrideTensorShape = nullptr,
662 bool optional = false)
663{
664 using HalOperand = typename HalPolicy::Operand;
665
666 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
667 if (!operand)
668 {
669 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
670 return ConstTensorPin();
671 }
672 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
673 model,
674 data,
675 dimensionMappings,
676 overrideTensorShape,
677 optional);
678}
679
680template<typename HalPolicy,
681 typename OutputType,
682 typename HalOperandType = typename HalPolicy::OperandType,
683 typename HalOperation = typename HalPolicy::Operation,
684 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100685bool GetInputScalar(const HalOperation& operation,
686 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100687 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100688 OutputType& outValue,
689 const HalModel& model,
690 const ConversionData& data)
691{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100692 using HalOperand = typename HalPolicy::Operand;
693
694 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100695 if (!operand)
696 {
697 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
698 }
699
700 if (operand->type != type)
701 {
702 return Fail("%s: unexpected operand type: %s (should be %s)",
703 __func__, toString(operand->type).c_str(), toString(type).c_str());
704 }
705
706 if (operand->location.length != sizeof(OutputType))
707 {
708 return Fail("%s: incorrect operand location length: %i (should be %i)",
709 __func__, operand->location.length, sizeof(OutputType));
710 }
711
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100712 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100713 if (!valueAddress)
714 {
715 return Fail("%s: failed to get address for operand", __func__);
716 }
717
718 outValue = *(static_cast<const OutputType*>(valueAddress));
719 return true;
720}
721
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100722template<typename HalPolicy,
723 typename HalOperation = typename HalPolicy::Operation,
724 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100725bool GetInputInt32(const HalOperation& operation,
726 uint32_t inputIndex,
727 int32_t& outValue,
728 const HalModel& model,
729 const ConversionData& data)
730{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100731 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100732}
733
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100734template<typename HalPolicy,
735 typename HalOperation = typename HalPolicy::Operation,
736 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100737bool GetInputFloat32(const HalOperation& operation,
738 uint32_t inputIndex,
739 float& outValue,
740 const HalModel& model,
741 const ConversionData& data)
742{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100743 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100744}
745
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100746template<typename HalPolicy,
747 typename HalOperation = typename HalPolicy::Operation,
748 typename HalOperandType = typename HalPolicy::OperandType,
749 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100750bool GetInputActivationFunctionImpl(const HalOperation& operation,
751 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100752 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100753 ActivationFn& outActivationFunction,
754 const HalModel& model,
755 const ConversionData& data)
756{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100757 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100758 {
759 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
760 __func__,
761 toString(type).c_str(),
762 toString(OperandType::INT32).c_str(),
763 toString(OperandType::TENSOR_INT32).c_str());
764 }
765
766 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100767 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100768 {
769 return Fail("%s: failed to get activation input value", __func__);
770 }
771 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
772 return true;
773}
774
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100775template<typename HalPolicy,
776 typename HalOperation = typename HalPolicy::Operation,
777 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100778bool GetInputActivationFunction(const HalOperation& operation,
779 uint32_t inputIndex,
780 ActivationFn& outActivationFunction,
781 const HalModel& model,
782 const ConversionData& data)
783{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100784 return GetInputActivationFunctionImpl<HalPolicy>(operation,
785 inputIndex,
786 HalPolicy::OperandType::INT32,
787 outActivationFunction,
788 model,
789 data);
arovir01b0717b52018-09-05 17:03:25 +0100790}
791
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100792template<typename HalPolicy,
793 typename HalOperation = typename HalPolicy::Operation,
794 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100795bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
796 uint32_t inputIndex,
797 ActivationFn& outActivationFunction,
798 const HalModel& model,
799 const ConversionData& data)
800{
801 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100802 return GetInputActivationFunctionImpl<HalPolicy>(operation,
803 inputIndex,
804 HalPolicy::OperandType::INT32,
805 outActivationFunction,
806 model,
807 data);
arovir01b0717b52018-09-05 17:03:25 +0100808}
809
810
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100811template<typename HalPolicy,
812 typename HalOperation = typename HalPolicy::Operation,
813 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100814bool GetOptionalInputActivation(const HalOperation& operation,
815 uint32_t inputIndex,
816 ActivationFn& activationFunction,
817 const HalModel& model,
818 const ConversionData& data)
819{
820 if (operation.inputs.size() <= inputIndex)
821 {
822 activationFunction = ActivationFn::kActivationNone;
823 }
824 else
825 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100826 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100827 {
828 return Fail("%s: Operation has invalid inputs", __func__);
829 }
830 }
831 return true;
832}
833
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100834template<typename HalPolicy,
835 typename ConvolutionDescriptor,
836 typename HalOperation = typename HalPolicy::Operation,
837 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100838bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
839 uint32_t dilationXIndex,
840 ConvolutionDescriptor& descriptor,
841 const HalModel& model,
842 const ConversionData& data)
843{
844 bool success = true;
845 if (operation.inputs.size() >= dilationXIndex + 2)
846 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100847 success &= GetInputScalar<HalPolicy>(operation,
848 dilationXIndex,
849 HalPolicy::OperandType::INT32,
850 descriptor.m_DilationX,
851 model,
852 data);
853 success &= GetInputScalar<HalPolicy>(operation,
854 dilationXIndex + 1,
855 HalPolicy::OperandType::INT32,
856 descriptor.m_DilationY,
857 model,
858 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100859 }
860
861 return success;
862}
863
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100864template<typename HalPolicy,
865 typename HalOperand = typename HalPolicy::Operand,
866 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100867bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100868 std::vector<int32_t>& outValues,
869 const HalModel& model,
870 const ConversionData& data)
871{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100872 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100873 {
874 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
875 }
876
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100877 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100878 if (!startAddress)
879 {
880 return Fail("%s: failed to get operand address", __func__, operand.type);
881 }
882
883 // Check number of bytes is sensible
884 const uint32_t numBytes = operand.location.length;
885 if (numBytes % sizeof(int32_t) != 0)
886 {
887 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
888 __func__, numBytes, sizeof(int32_t));
889 }
890
891 outValues.resize(numBytes / sizeof(int32_t));
892 memcpy(outValues.data(), startAddress, numBytes);
893 return true;
894}
895
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100896template<typename HalPolicy,
897 typename HalOperation = typename HalPolicy::Operation,
898 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100899bool GetInputPaddingScheme(const HalOperation& operation,
900 uint32_t inputIndex,
901 PaddingScheme& outPaddingScheme,
902 const HalModel& model,
903 const ConversionData& data)
904{
905 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100906 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100907 {
908 return Fail("%s: failed to get padding scheme input value", __func__);
909 }
910
911 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
912 return true;
913}
914
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100915template<typename HalPolicy,
916 typename HalOperation = typename HalPolicy::Operation,
917 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100918LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
919 uint32_t inputIndex,
920 const HalModel& model,
921 ConversionData& data)
922{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100923 using HalOperand = typename HalPolicy::Operand;
Sadik Armagan44bcc022019-06-18 17:21:36 +0100924 using HalOperandType = typename HalPolicy::OperandType;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100925 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
926
927 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100928 if (!operand)
929 {
930 Fail("%s: failed to get input operand %i", __func__, inputIndex);
931 return LayerInputHandle();
932 }
933
934 if (!IsOperandTypeSupportedForTensors(operand->type))
935 {
936 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
937 return LayerInputHandle();
938 }
939
Sadik Armagan44bcc022019-06-18 17:21:36 +0100940 try
arovir01b0717b52018-09-05 17:03:25 +0100941 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100942 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
arovir01b0717b52018-09-05 17:03:25 +0100943
Sadik Armagan44bcc022019-06-18 17:21:36 +0100944 switch (operand->lifetime)
arovir01b0717b52018-09-05 17:03:25 +0100945 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100946 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
947 case HalOperandLifeTime::MODEL_INPUT:
948 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +0100949 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100950 // The tensor is either an operand internal to the model, or a model input.
951 // It can be associated with an ArmNN output slot for an existing layer.
952
953 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
954 const uint32_t operandIndex = operation.inputs[inputIndex];
955 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
956 break;
957 }
958 case HalOperandLifeTime::CONSTANT_COPY:
959 case HalOperandLifeTime::CONSTANT_REFERENCE:
960 {
961 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
962 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
963 if (tensorPin.IsValid())
arovir01b0717b52018-09-05 17:03:25 +0100964 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100965 if (!IsLayerSupportedForAnyBackend(__func__,
966 armnn::IsConstantSupported,
967 data.m_Backends,
968 tensorPin.GetConstTensor().GetInfo()))
969 {
970 return LayerInputHandle();
971 }
972
973 armnn::IConnectableLayer* constantLayer =
974 data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
975 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
976 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
977
978 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
979 }
980 else
981 {
982 Fail("%s: invalid operand tensor", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100983 return LayerInputHandle();
984 }
Sadik Armagan44bcc022019-06-18 17:21:36 +0100985 break;
arovir01b0717b52018-09-05 17:03:25 +0100986 }
Sadik Armagan44bcc022019-06-18 17:21:36 +0100987 default:
arovir01b0717b52018-09-05 17:03:25 +0100988 {
Sadik Armagan44bcc022019-06-18 17:21:36 +0100989 // Unsupported lifetime for an input tensor
990 Fail("%s: unsupported lifetime for input tensor: %s",
991 __func__, toString(operand->lifetime).c_str());
arovir01b0717b52018-09-05 17:03:25 +0100992 return LayerInputHandle();
993 }
arovir01b0717b52018-09-05 17:03:25 +0100994 }
Sadik Armagan44bcc022019-06-18 17:21:36 +0100995 }
996 catch (UnsupportedOperand<HalOperandType>& e)
997 {
998 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
999 return LayerInputHandle();
arovir01b0717b52018-09-05 17:03:25 +01001000 }
1001}
1002
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001003template<typename HalPolicy,
1004 typename HalOperation = typename HalPolicy::Operation,
1005 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001006bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1007 uint32_t operationOutputIndex,
1008 armnn::IConnectableLayer& layer,
1009 uint32_t layerOutputIndex,
1010 const HalModel& model,
1011 ConversionData& data)
1012{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001013 using HalOperand = typename HalPolicy::Operand;
1014
1015 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001016 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
1017 {
1018 return false;
1019 }
1020
1021 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
1022
1023 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
1024 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
1025
1026 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1027
1028 return true;
1029}
1030
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001031template<typename HalPolicy,
1032 typename HalOperation = typename HalPolicy::Operation,
1033 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001034armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1035 uint32_t inputIndex,
1036 const HalModel& model,
1037 ConversionData& data)
1038{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001039 using HalOperand = typename HalPolicy::Operand;
1040
1041 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001042 if (!operand)
1043 {
1044 return armnn::DataLayout::NHWC;
1045 }
1046
1047 if (!IsBool(*operand))
1048 {
1049 return armnn::DataLayout::NHWC;
1050 }
1051
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001052 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001053 if (!valueAddress)
1054 {
1055 return armnn::DataLayout::NHWC;
1056 }
1057
1058 if (*(static_cast<const bool*>(valueAddress)))
1059 {
1060 return armnn::DataLayout::NCHW;
1061 }
1062 else
1063 {
1064 return armnn::DataLayout::NHWC;
1065 }
1066}
1067
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001068template<typename HalPolicy,
1069 typename HalOperation = typename HalPolicy::Operation,
1070 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001071bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1072 uint32_t outputIndex,
1073 armnn::IConnectableLayer& layer,
1074 const HalModel& model,
1075 ConversionData& data)
1076{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001077 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, outputIndex, layer, outputIndex, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001078}
1079
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001080template<typename HalPolicy,
1081 typename HalOperation = typename HalPolicy::Operation,
1082 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001083bool ConvertToActivation(const HalOperation& operation,
1084 const char* operationName,
1085 const armnn::ActivationDescriptor& activationDesc,
1086 const HalModel& model,
1087 ConversionData& data)
1088{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001089 using HalOperand = typename HalPolicy::Operand;
1090
1091 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001092 if (!input.IsValid())
1093 {
1094 return Fail("%s: Input 0 is invalid", operationName);
1095 }
1096
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001097 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001098 if (!outputOperand)
1099 {
1100 return false;
1101 }
1102 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001103 if (!IsLayerSupportedForAnyBackend(__func__,
1104 armnn::IsActivationSupported,
1105 data.m_Backends,
1106 input.GetTensorInfo(),
1107 outInfo,
1108 activationDesc))
arovir01b0717b52018-09-05 17:03:25 +01001109 {
1110 return false;
1111 }
1112
1113 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1114 BOOST_ASSERT(layer != nullptr);
1115 input.Connect(layer->GetInputSlot(0));
1116
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001117 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001118}
1119
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001120template<typename HalPolicy,
1121 typename HalOperation = typename HalPolicy::Operation,
1122 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001123bool ConvertPooling2d(const HalOperation& operation,
1124 const char* operationName,
1125 armnn::PoolingAlgorithm poolType,
1126 const HalModel& model,
1127 ConversionData& data)
1128{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001129 using HalOperand = typename HalPolicy::Operand;
1130 using HalOperandType = typename HalPolicy::OperandType;
1131
1132 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001133 if (!input.IsValid())
1134 {
1135 return Fail("%s: Could not read input 0", operationName);
1136 }
1137
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001138 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001139 if (!output)
1140 {
1141 return Fail("%s: Could not read output 0", __func__);
1142 }
1143
1144 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1145 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1146
arovir01b0717b52018-09-05 17:03:25 +01001147 armnn::Pooling2dDescriptor desc;
1148 desc.m_PoolType = poolType;
1149 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001150 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001151
1152 ActivationFn activation;
1153
1154 if (operation.inputs.size() == 7)
1155 {
1156 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1157 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001158 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1159 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1160 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1161 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1162 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1163 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001164 {
1165 return Fail("%s: Operation has invalid inputs", operationName);
1166 }
1167
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001168 const unsigned int inputWidth = inputInfo.GetShape()[2];
1169 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001170
1171 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1172 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1173 }
1174 else
1175 {
1176 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001177 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1178 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1179 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1180 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1181 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1182 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1183 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1184 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1185 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001186 {
1187 return Fail("%s: Operation has invalid inputs", operationName);
1188 }
1189 }
1190
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001191 if (!IsLayerSupportedForAnyBackend(__func__,
1192 armnn::IsPooling2dSupported,
1193 data.m_Backends,
1194 inputInfo,
1195 outputInfo,
1196 desc))
arovir01b0717b52018-09-05 17:03:25 +01001197 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001198 return false;
arovir01b0717b52018-09-05 17:03:25 +01001199 }
arovir01b0717b52018-09-05 17:03:25 +01001200
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001201 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1202 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001203 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001204 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001205 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001206
1207 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1208 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001209 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001210 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001211 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001212
1213 input.Connect(pooling2dLayer->GetInputSlot(0));
1214
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001215 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001216}
1217
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001218template<typename HalPolicy,
1219 typename HalOperation = typename HalPolicy::Operation,
1220 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001221bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1222{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001223 using HalOperand = typename HalPolicy::Operand;
1224 using HalOperandType = typename HalPolicy::OperandType;
1225
1226 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001227 if (!input.IsValid())
1228 {
1229 return Fail("%s: Operation has invalid inputs", __func__);
1230 }
1231
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001232 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001233 if (!output)
1234 {
1235 return Fail("%s: Could not read output 0", __func__);
1236 }
1237
1238 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1239 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1240
1241 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001242 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1243 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001244
1245 if (!weightsPin.IsValid() || !biasPin.IsValid())
1246 {
1247 return Fail("%s: Operation has invalid inputs", __func__);
1248 }
1249
1250 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1251 armnn::ConstTensor bias = biasPin.GetConstTensor();
1252 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1253
1254 armnn::Convolution2dDescriptor desc;
1255 desc.m_DataLayout = armnn::DataLayout::NHWC;
1256 ActivationFn activation;
1257
1258 if (operation.inputs.size() >= 10)
1259 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001260 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1261 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1262 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1263 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1264 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1265 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1266 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data) ||
1267 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 11, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001268 {
1269 return Fail("%s: Operation has invalid inputs", __func__);
1270 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001271 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001272 }
1273 else if (operation.inputs.size() >= 7)
1274 {
1275 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001276 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1277 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1278 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1279 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data) ||
1280 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 8, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001281 {
1282 return Fail("%s: Operation has invalid inputs", __func__);
1283 }
1284
1285 const uint32_t kernelX = weights.GetShape()[2];
1286 const uint32_t kernelY = weights.GetShape()[1];
1287 const uint32_t inputX = inputInfo.GetShape()[2];
1288 const uint32_t inputY = inputInfo.GetShape()[1];
1289
1290 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1291 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1292
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001293 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001294 }
1295 else
1296 {
1297 return Fail("%s: Unsupported number of operation inputs", __func__);
1298 }
1299
1300 desc.m_BiasEnabled = true;
1301 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1302
1303 if (!IsLayerSupportedForAnyBackend(__func__,
1304 armnn::IsConvolution2dSupported,
1305 data.m_Backends,
1306 inputInfo,
1307 outputInfo,
1308 desc,
1309 weights.GetInfo(),
1310 biases))
1311 {
1312 return false;
1313 }
1314
1315 armnn::IConnectableLayer* startLayer =
1316 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1317
1318 if (!startLayer)
1319 {
1320 return Fail("%s: AddConvolution2dLayer failed", __func__);
1321 }
1322
1323 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1324
1325 if (!endLayer)
1326 {
1327 return Fail("%s: ProcessActivation failed", __func__);
1328 }
1329
1330 input.Connect(startLayer->GetInputSlot(0));
1331
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001332 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001333}
1334
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001335template<typename HalPolicy,
1336 typename HalOperation = typename HalPolicy::Operation,
1337 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001338bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1339{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001340 using HalOperand = typename HalPolicy::Operand;
1341 using HalOperandType = typename HalPolicy::OperandType;
1342
1343 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001344
1345 if (!input.IsValid())
1346 {
1347 return Fail("%s: Operation has invalid inputs", __func__);
1348 }
1349
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001350 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001351
1352 if (!output)
1353 {
1354 return Fail("%s: Could not read output 0", __func__);
1355 }
1356
1357 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1358 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1359
1360 // ArmNN does not currently support non-fixed weights or bias
1361
1362 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001363 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001364
1365 if (weightsOperand == nullptr)
1366 {
1367 return Fail("%s: Operand is invalid", __func__);
1368 }
1369 armnn::DepthwiseConvolution2dDescriptor desc;
1370 desc.m_DataLayout = armnn::DataLayout::NHWC;
1371
1372 // Look ahead to find the optional DataLayout, if present
1373 if (operation.inputs.size() >= 12)
1374 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001375 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 11, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001376 }
1377 else if (operation.inputs.size() >= 9)
1378 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001379 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 8, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001380 }
1381
1382 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1383 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
1384 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1385 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1386
1387 // Reinterpret weight data as [ H, W, I, M ]
1388 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1389 weightsOperand->dimensions[2],
1390 inputInfo.GetShape()[channelsIndex],
1391 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
1392
1393 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1394 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1395
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001396 const ConstTensorPin weightsPin =
1397 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1398 1,
1399 model,
1400 data,
1401 HWIMToMIHW,
1402 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001403
1404 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001405 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001406
1407 if (!weightsPin.IsValid() || !biasPin.IsValid())
1408 {
1409 return Fail("%s: Operation has invalid inputs", __func__);
1410 }
1411
1412 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1413 armnn::ConstTensor bias = biasPin.GetConstTensor();
1414 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1415
1416 ActivationFn activation;
1417
1418 if (operation.inputs.size() >= 11)
1419 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001420 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1421 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1422 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1423 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1424 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1425 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1426 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data) ||
1427 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 12, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001428 {
1429 return Fail("%s: Operation has invalid inputs", __func__);
1430 }
1431 }
1432 else if (operation.inputs.size() >= 8)
1433 {
1434 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001435 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1436 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1437 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1438 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data) ||
1439 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 9, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001440 {
1441 return Fail("%s: Operation has invalid inputs", __func__);
1442 }
1443
1444 const uint32_t kernelX = weights.GetShape()[3];
1445 const uint32_t kernelY = weights.GetShape()[2];
1446 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1447 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1448
1449 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1450 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1451 }
1452 else
1453 {
1454 return Fail("%s: Unsupported number of operation inputs", __func__);
1455 }
1456
1457 desc.m_BiasEnabled = true;
1458 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1459
1460 if (!IsLayerSupportedForAnyBackend(__func__,
1461 armnn::IsDepthwiseConvolutionSupported,
1462 data.m_Backends,
1463 inputInfo,
1464 outputInfo,
1465 desc,
1466 weights.GetInfo(),
1467 biases))
1468 {
1469 return false;
1470 }
1471
1472 armnn::IConnectableLayer* startLayer =
1473 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1474 if (!startLayer)
1475 {
1476 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1477 }
1478
1479 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1480 if (!endLayer)
1481 {
1482 return Fail("%s: ProcessActivation failed", __func__);
1483 }
1484
1485 input.Connect(startLayer->GetInputSlot(0));
1486
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001487 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001488}
1489
saoste01b8471482018-10-10 09:44:51 +01001490} // namespace armnn_driver