blob: 8b63f78074d1c0a608945ff912e4bee036a7187c [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010011#include "armnn/src/armnnUtils/Permute.hpp"
12#include "Utils.hpp"
13
14#include <ActivationFunctor.h>
15#include <CpuExecutor.h>
16#include <OperationsUtils.h>
17
18#include <boost/assert.hpp>
19#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010020#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010021#include <boost/test/tools/floating_point_comparison.hpp>
22
23#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010024#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010025
26namespace armnn_driver
27{
28
29///
30/// Helper classes
31///
32
33struct ConversionData
34{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010035 ConversionData(const std::vector<armnn::BackendId>& backends)
36 : m_Backends(backends)
37 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010038 {}
39
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010041 armnn::INetworkPtr m_Network;
42 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
43 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
44};
45
46class LayerInputHandle
47{
48public:
49 LayerInputHandle();
50 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
51
52 bool IsValid() const;
53
54 void Connect(armnn::IInputSlot& inputSlot);
55
56 const armnn::TensorInfo& GetTensorInfo() const;
57
58private:
59 armnn::IOutputSlot* m_OutputSlot;
60 bool m_Valid;
61 armnn::TensorInfo m_TensorInfo;
62};
63
64class ConstTensorPin
65{
66public:
67 // Creates an invalid tensor pin (can be used to signal errors)
68 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
69 ConstTensorPin(bool optional = false);
70
71 // @param tensorInfo TensorInfo associated with the tensor.
72 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
73 // the model being converted.
74 // @param numBytes Number of bytes for the tensor data.
75 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
76 const armnn::PermutationVector& mappings);
77
78 ConstTensorPin(const ConstTensorPin& other) = delete;
79 ConstTensorPin(ConstTensorPin&& other) = default;
80
81 bool IsValid() const;
82 bool IsOptional() const;
83
84 const armnn::ConstTensor& GetConstTensor() const;
85 const armnn::ConstTensor* GetConstTensorPtr() const;
86
87private:
88 armnn::ConstTensor m_ConstTensor;
89
90 // Owned memory for swizzled tensor data, only required if the tensor needed
91 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
92 // the pools associated with the model being converted.
93 std::vector<uint8_t> m_SwizzledTensorData;
94
95 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
96 bool m_Optional;
97};
98
99} // namespace armnn_driver
100
101///
102/// Utility functions
103///
104
105namespace
106{
107
108using namespace armnn_driver;
109using namespace android::nn;
110
111// Convenience function to log the reason for failing to convert a model.
112// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
113template<class... Args>
114static bool Fail(const char* formatStr, Args&&... args)
115{
116 ALOGD(formatStr, std::forward<Args>(args)...);
117 return false;
118}
119
120// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
121// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
122template<typename IsLayerSupportedFunc, typename ... Args>
123bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
124{
125 std::vector<char> unsupportedReason(1024+1);
126 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
127 if(isSupported)
128 {
129 return true;
130 }
131 else
132 {
133 std::string sUnsupportedReason(unsupportedReason.data());
134 if (sUnsupportedReason.size() > 0)
135 {
136 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
137 } else
138 {
139 ALOGD("%s: not supported by armnn", funcName);
140 }
141 return false;
142 }
143}
144
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100145template<typename IsLayerSupportedFunc, typename ... Args>
146bool IsLayerSupportedForAnyBackend(const char* funcName,
147 IsLayerSupportedFunc f,
148 const std::vector<armnn::BackendId>& backends,
149 Args&&... args)
150{
151 for (auto&& backend : backends)
152 {
153 if (IsLayerSupported(funcName, f, backend, std::forward<Args>(args)...))
154 {
155 return true;
156 }
157 }
158
159 ALOGD("%s: not supported by any specified backend", funcName);
160 return false;
161}
162
Mike Kellyb5fdf382019-06-11 16:35:25 +0100163template<typename Operand>
164armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100165{
166 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
167}
168
Matthew Bentham912b3622019-05-03 15:49:14 +0100169inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100170{
Matthew Bentham912b3622019-05-03 15:49:14 +0100171 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
172 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
173 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100174}
175
Mike Kellyb5fdf382019-06-11 16:35:25 +0100176#ifdef ARMNN_ANDROID_NN_V1_2
177
178inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
179{
180 return type == V1_2::OperandType::BOOL ||
181 type == V1_2::OperandType::TENSOR_FLOAT16 ||
182 type == V1_2::OperandType::TENSOR_FLOAT32 ||
183 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
184 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
185 type == V1_2::OperandType::TENSOR_INT32;
186}
187
188#endif
189
190inline bool IsBool(V1_0::Operand)
191{
192 return false;
193}
194
195#ifdef ARMNN_ANDROID_NN_V1_2
196
197inline bool IsBool(V1_2::Operand operand)
198{
199 return operand.type == V1_2::OperandType::BOOL;
200}
201
202#endif
203
arovir01b0717b52018-09-05 17:03:25 +0100204void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
205 armnn::INetwork& network)
206{
207 BOOST_ASSERT(startLayer != nullptr);
208 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
209 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
210
211 if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
212 {
213 // If the number of dimensions do not match then we need to add degenerate dimensions
214 // to the "smaller" tensor using a reshape:
215 // Small Big
216 // | |
217 // Reshape |
218 // \ /
219 // Add
220 bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
221
222 LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
223 const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
224
225 LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
226 const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
227
228 const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
229 std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
230 unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
231 for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
232 {
233 reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
234 }
235 armnn::TensorInfo reshapedInfo = smallTensorDims;
236 reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
237 reshapedDims.data() });
238
239 armnn::ReshapeDescriptor reshapeDesc;
240 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
241 armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
242 smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
243 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
244
245 // Connect the outputs from new reshape and original input layer
246 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
247 bigTensorHandle.Connect(startLayer->GetInputSlot(1));
248 }
249 else
250 {
251 input0.Connect(startLayer->GetInputSlot(0));
252 input1.Connect(startLayer->GetInputSlot(1));
253 }
254}
255
256void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
257 android::nn::PaddingScheme scheme)
258{
259 int32_t padHead;
260 int32_t padTail;
261 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
262 outPadHead = boost::numeric_cast<uint32_t>(padHead);
263 outPadTail = boost::numeric_cast<uint32_t>(padTail);
264}
265
Matthew Bentham912b3622019-05-03 15:49:14 +0100266Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100267{
268 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100269 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100270 shape.dimensions = operand.dimensions;
271 shape.scale = operand.scale;
272 shape.offset = operand.zeroPoint;
273 return shape;
274}
275
276// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
277// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
278// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
279// (us, in this case) to ensure they match.
280void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
281 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
282{
283 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
284 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
285 {
286 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
287 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
288 {
289 ALOGW("Bias quantization scale has been modified to match input*weights");
290 biasInfo.SetQuantizationScale(expectedBiasScale);
291 }
292 }
293}
294
295// 4D Tensor Permutations
296const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
297const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
298const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
299const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
300
301// 3D Permutation Vectors
302const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
303const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
304const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
305
306template<typename OSlot>
307armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
308 const armnn::PermutationVector& mappings)
309{
310 // Add swizzle layer
311 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
312
313 BOOST_ASSERT(layer != nullptr);
314
315 // Connect input to swizzle layer
316 input.Connect(layer->GetInputSlot(0));
317
318 // Setup swizzled output
319 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
320 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
321
322 return *layer;
323}
324
325void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
326{
327 // Add swizzle layer
328 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
329 // Connect swizzled input to layer
330 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
331}
332
333armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
334{
335 // Add deswizzle layer
336 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
337 return deswizzleLayer;
338}
339
340// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
341armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
342 LayerInputHandle& input,
343 armnn::IConnectableLayer& firstLayer,
344 armnn::IConnectableLayer& lastLayer)
345{
346 SwizzleIn(network, input, firstLayer, 0);
347 return DeswizzleOut(network, lastLayer, 0);
348}
349
350// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
351armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
352 armnn::IConnectableLayer& layer)
353{
354 return SwizzleInDeswizzleOut(network, input, layer, layer);
355}
356
357bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
358 const armnn::TensorShape & outputShape,
359 uint32_t concatDim)
360{
361 // Validate the output shape is correct given the input shapes (which have just been validated)
362 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
363 if (outputShape.GetNumDimensions() != numDimensions)
364 {
365 return Fail("%s: Output shape has wrong number of dimensions", __func__);
366 }
367
368 unsigned int outputSizeAlongConcatenatedDimension = 0;
369 for (unsigned int i = 0; i < inputShapes.size(); i++)
370 {
371 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
372 }
373
374 for (unsigned int i = 0; i < numDimensions; ++i)
375 {
376 if (i == concatDim)
377 {
378 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
379 {
380 return Fail(
381 "%s: Invalid output shape for dimension %d (%d != %d)",
382 __func__,
383 i,
384 outputShape[i],
385 outputSizeAlongConcatenatedDimension);
386 }
387 }
388 else
389 {
390 if (outputShape[i] != inputShapes[0][i])
391 {
392 return Fail("%s: Invalid output shape", __func__);
393 }
394 }
395 }
396
397 return true;
398}
399
400bool RequiresReshape(armnn::TensorShape & inputShape)
401{
402 return inputShape.GetNumDimensions() < 3;
403}
404
405template<typename OSlot>
406armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
407 armnn::TensorInfo reshapeInfo)
408{
409 armnn::ReshapeDescriptor reshapeDescriptor;
410 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
411
412 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
413 BOOST_ASSERT(reshapeLayer != nullptr);
414
415 // Attach the input layer to the reshape layer
416 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
417 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
418
419 return *reshapeLayer;
420}
421
422void SwizzleInputs(armnn::INetwork& network,
423 std::vector<LayerInputHandle>& inputs,
424 std::vector<armnn::TensorShape>& inputShapes,
425 const armnn::PermutationVector& mapping)
426{
427 if (!mapping.IsEqual(IdentityPermutation4D))
428 {
429 size_t nInputs = inputs.size();
430 for (size_t i=0; i<nInputs; ++i)
431 {
432 // add swizzle layer
433 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
434 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
435 auto& outputInfo = outputSlot.GetTensorInfo();
436 // replace inputs with the swizzled ones
437 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
438 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
439 }
440 }
441}
442
narpra01f176d5a2018-11-18 20:17:48 +0000443bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
444 int32_t & concatDimension,
445 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100446{
narpra01f176d5a2018-11-18 20:17:48 +0000447 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100448 BOOST_ASSERT(numberOfDimensions >= 3);
449
450 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000451 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
452 // or along dimension 0 or 2 for a 3-D tensor.
453 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100454 {
narpra01f176d5a2018-11-18 20:17:48 +0000455 concatDimension = 1;
456 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
457 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100458 }
narpra01f176d5a2018-11-18 20:17:48 +0000459 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100460 {
narpra01f176d5a2018-11-18 20:17:48 +0000461 concatDimension = 0;
462 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
463 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100464 }
narpra01f176d5a2018-11-18 20:17:48 +0000465 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100466}
467
468} // anonymous namespace
469
470namespace armnn_driver
471{
472
473//// Creates an ArmNN activation layer and connects it to the given layer, if the
474//// passed in AndroidNN activation function requires so.
475//// @return The end layer of the sequence of layers built for the given AndroidNN
476//// activation function or nullptr if an error occurred (e.g. unsupported activation).
477//// Note that the end layer matches the input layer if no activation is required
478//// (the sequence of layers has length 1).
479armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
480 ActivationFn activation,
481 armnn::IConnectableLayer* prevLayer,
482 ConversionData& data);
483
484} // namespace armnn_driver
485
486///
487/// Utility templates
488///
489
490namespace armnn_driver
491{
492
493using namespace android::nn;
494
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100495template<typename HalPolicy,
496 typename HalOperand = typename HalPolicy::Operand,
497 typename HalOperation = typename HalPolicy::Operation,
498 typename HalModel = typename HalPolicy::Model>
499const HalOperand* GetInputOperand(const HalOperation& operation,
500 uint32_t inputIndex,
501 const HalModel& model,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100502 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100503{
504 if (inputIndex >= operation.inputs.size())
505 {
saoste01b8471482018-10-10 09:44:51 +0100506 if (failOnIndexOutOfBounds)
507 {
508 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
509 }
arovir01b0717b52018-09-05 17:03:25 +0100510 return nullptr;
511 }
512
513 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
514 return &model.operands[operation.inputs[inputIndex]];
515}
516
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100517template<typename HalPolicy,
518 typename HalOperand = typename HalPolicy::Operand,
519 typename HalOperation = typename HalPolicy::Operation,
520 typename HalModel = typename HalPolicy::Model>
521const HalOperand* GetOutputOperand(const HalOperation& operation,
522 uint32_t outputIndex,
523 const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100524{
525 if (outputIndex >= operation.outputs.size())
526 {
527 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
528 return nullptr;
529 }
530
531 // Model should have been validated beforehand
532 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
533
534 return &model.operands[operation.outputs[outputIndex]];
535}
536
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100537template<typename HalPolicy,
538 typename HalOperand = typename HalPolicy::Operand,
539 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100540const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100541 const HalModel& model,
542 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000543 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100544{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100545 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
arovir01b0717b52018-09-05 17:03:25 +0100546
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100547 const void* valueStart = nullptr;
arovir01b0717b52018-09-05 17:03:25 +0100548 switch (operand.lifetime)
549 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100550 case HalOperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100551 {
552 // Constant found in model.operandValues
553 valueStart = &model.operandValues[operand.location.offset];
554 break;
555 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100556 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100557 {
558 // Constant specified via a Memory object
559 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
560 break;
561 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100562 case HalOperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000563 {
564 // An optional input tensor with no values is not an error so should not register as a fail
565 if (optional)
566 {
567 valueStart = nullptr;
568 break;
569 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100570 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000571 }
arovir01b0717b52018-09-05 17:03:25 +0100572 default:
573 {
574 // Unsupported/invalid (e.g. can't get value of an input to the model)
575 Fail("%s: unsupported/invalid operand lifetime: %s",
576 __func__, toString(operand.lifetime).c_str());
577 valueStart = nullptr;
578 }
579 }
580
581 return valueStart;
582}
583
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100584template<typename HalPolicy,
585 typename HalOperand = typename HalPolicy::Operand,
586 typename HalModel = typename HalPolicy::Model>
587ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
588 const HalModel& model,
589 const ConversionData& data,
590 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
591 const armnn::TensorShape* overrideTensorShape = nullptr,
592 bool optional = false)
593{
594 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
595
596 if (!IsOperandTypeSupportedForTensors(operand.type))
597 {
598 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
599 return ConstTensorPin();
600 }
601
602 if (!optional &&
603 operand.lifetime != HalOperandLifeTime::CONSTANT_COPY &&
604 operand.lifetime != HalOperandLifeTime::CONSTANT_REFERENCE &&
605 operand.lifetime != HalOperandLifeTime::NO_VALUE)
606 {
607 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
608 return ConstTensorPin();
609 }
610
611 const void* const valueStart = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data, optional);
612 if (!valueStart)
613 {
614 if (optional)
615 {
616 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
617 return ConstTensorPin(true);
618 }
619 // mandatory tensor with no values
620 Fail("%s: failed to get operand address", __func__);
621 return ConstTensorPin();
622 }
623
624 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
625 if (overrideTensorShape != nullptr)
626 {
627 tensorInfo.SetShape(*overrideTensorShape);
628 }
629 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
630}
631
632template<typename HalPolicy,
633 typename HalOperation = typename HalPolicy::Operation,
634 typename HalModel = typename HalPolicy::Model>
635ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
636 uint32_t inputIndex,
637 const HalModel& model,
638 const ConversionData& data,
639 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
640 const armnn::TensorShape* overrideTensorShape = nullptr,
641 bool optional = false)
642{
643 using HalOperand = typename HalPolicy::Operand;
644
645 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
646 if (!operand)
647 {
648 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
649 return ConstTensorPin();
650 }
651 return ConvertOperandToConstTensorPin<HalPolicy>(*operand,
652 model,
653 data,
654 dimensionMappings,
655 overrideTensorShape,
656 optional);
657}
658
659template<typename HalPolicy,
660 typename OutputType,
661 typename HalOperandType = typename HalPolicy::OperandType,
662 typename HalOperation = typename HalPolicy::Operation,
663 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100664bool GetInputScalar(const HalOperation& operation,
665 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100666 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100667 OutputType& outValue,
668 const HalModel& model,
669 const ConversionData& data)
670{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100671 using HalOperand = typename HalPolicy::Operand;
672
673 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100674 if (!operand)
675 {
676 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
677 }
678
679 if (operand->type != type)
680 {
681 return Fail("%s: unexpected operand type: %s (should be %s)",
682 __func__, toString(operand->type).c_str(), toString(type).c_str());
683 }
684
685 if (operand->location.length != sizeof(OutputType))
686 {
687 return Fail("%s: incorrect operand location length: %i (should be %i)",
688 __func__, operand->location.length, sizeof(OutputType));
689 }
690
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100691 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100692 if (!valueAddress)
693 {
694 return Fail("%s: failed to get address for operand", __func__);
695 }
696
697 outValue = *(static_cast<const OutputType*>(valueAddress));
698 return true;
699}
700
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100701template<typename HalPolicy,
702 typename HalOperation = typename HalPolicy::Operation,
703 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100704bool GetInputInt32(const HalOperation& operation,
705 uint32_t inputIndex,
706 int32_t& outValue,
707 const HalModel& model,
708 const ConversionData& data)
709{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100710 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::INT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100711}
712
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100713template<typename HalPolicy,
714 typename HalOperation = typename HalPolicy::Operation,
715 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100716bool GetInputFloat32(const HalOperation& operation,
717 uint32_t inputIndex,
718 float& outValue,
719 const HalModel& model,
720 const ConversionData& data)
721{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100722 return GetInputScalar<HalPolicy>(operation, inputIndex, HalPolicy::OperandType::FLOAT32, outValue, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100723}
724
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100725template<typename HalPolicy,
726 typename HalOperation = typename HalPolicy::Operation,
727 typename HalOperandType = typename HalPolicy::OperandType,
728 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100729bool GetInputActivationFunctionImpl(const HalOperation& operation,
730 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100731 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100732 ActivationFn& outActivationFunction,
733 const HalModel& model,
734 const ConversionData& data)
735{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100736 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100737 {
738 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
739 __func__,
740 toString(type).c_str(),
741 toString(OperandType::INT32).c_str(),
742 toString(OperandType::TENSOR_INT32).c_str());
743 }
744
745 int32_t activationFunctionAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100746 if (!GetInputScalar<HalPolicy>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100747 {
748 return Fail("%s: failed to get activation input value", __func__);
749 }
750 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
751 return true;
752}
753
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100754template<typename HalPolicy,
755 typename HalOperation = typename HalPolicy::Operation,
756 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100757bool GetInputActivationFunction(const HalOperation& operation,
758 uint32_t inputIndex,
759 ActivationFn& outActivationFunction,
760 const HalModel& model,
761 const ConversionData& data)
762{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100763 return GetInputActivationFunctionImpl<HalPolicy>(operation,
764 inputIndex,
765 HalPolicy::OperandType::INT32,
766 outActivationFunction,
767 model,
768 data);
arovir01b0717b52018-09-05 17:03:25 +0100769}
770
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100771template<typename HalPolicy,
772 typename HalOperation = typename HalPolicy::Operation,
773 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100774bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
775 uint32_t inputIndex,
776 ActivationFn& outActivationFunction,
777 const HalModel& model,
778 const ConversionData& data)
779{
780 // This only accepts a 1-D tensor of size 1
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100781 return GetInputActivationFunctionImpl<HalPolicy>(operation,
782 inputIndex,
783 HalPolicy::OperandType::INT32,
784 outActivationFunction,
785 model,
786 data);
arovir01b0717b52018-09-05 17:03:25 +0100787}
788
789
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100790template<typename HalPolicy,
791 typename HalOperation = typename HalPolicy::Operation,
792 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100793bool GetOptionalInputActivation(const HalOperation& operation,
794 uint32_t inputIndex,
795 ActivationFn& activationFunction,
796 const HalModel& model,
797 const ConversionData& data)
798{
799 if (operation.inputs.size() <= inputIndex)
800 {
801 activationFunction = ActivationFn::kActivationNone;
802 }
803 else
804 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100805 if (!GetInputActivationFunction<HalPolicy>(operation, inputIndex, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100806 {
807 return Fail("%s: Operation has invalid inputs", __func__);
808 }
809 }
810 return true;
811}
812
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100813template<typename HalPolicy,
814 typename ConvolutionDescriptor,
815 typename HalOperation = typename HalPolicy::Operation,
816 typename HalModel = typename HalPolicy::Model>
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100817bool GetOptionalConvolutionDilationParams(const HalOperation& operation,
818 uint32_t dilationXIndex,
819 ConvolutionDescriptor& descriptor,
820 const HalModel& model,
821 const ConversionData& data)
822{
823 bool success = true;
824 if (operation.inputs.size() >= dilationXIndex + 2)
825 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100826 success &= GetInputScalar<HalPolicy>(operation,
827 dilationXIndex,
828 HalPolicy::OperandType::INT32,
829 descriptor.m_DilationX,
830 model,
831 data);
832 success &= GetInputScalar<HalPolicy>(operation,
833 dilationXIndex + 1,
834 HalPolicy::OperandType::INT32,
835 descriptor.m_DilationY,
836 model,
837 data);
Aron Virginas-Tar07c7c9a2019-06-12 14:03:35 +0100838 }
839
840 return success;
841}
842
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100843template<typename HalPolicy,
844 typename HalOperand = typename HalPolicy::Operand,
845 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100846bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100847 std::vector<int32_t>& outValues,
848 const HalModel& model,
849 const ConversionData& data)
850{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100851 if (operand.type != HalPolicy::OperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100852 {
853 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
854 }
855
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100856 const void* startAddress = GetOperandValueReadOnlyAddress<HalPolicy>(operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100857 if (!startAddress)
858 {
859 return Fail("%s: failed to get operand address", __func__, operand.type);
860 }
861
862 // Check number of bytes is sensible
863 const uint32_t numBytes = operand.location.length;
864 if (numBytes % sizeof(int32_t) != 0)
865 {
866 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
867 __func__, numBytes, sizeof(int32_t));
868 }
869
870 outValues.resize(numBytes / sizeof(int32_t));
871 memcpy(outValues.data(), startAddress, numBytes);
872 return true;
873}
874
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100875template<typename HalPolicy,
876 typename HalOperation = typename HalPolicy::Operation,
877 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100878bool GetInputPaddingScheme(const HalOperation& operation,
879 uint32_t inputIndex,
880 PaddingScheme& outPaddingScheme,
881 const HalModel& model,
882 const ConversionData& data)
883{
884 int32_t paddingSchemeAsInt;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100885 if (!GetInputInt32<HalPolicy>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100886 {
887 return Fail("%s: failed to get padding scheme input value", __func__);
888 }
889
890 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
891 return true;
892}
893
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100894template<typename HalPolicy,
895 typename HalOperation = typename HalPolicy::Operation,
896 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +0100897LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
898 uint32_t inputIndex,
899 const HalModel& model,
900 ConversionData& data)
901{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100902 using HalOperand = typename HalPolicy::Operand;
903 using HalOperandLifeTime = typename HalPolicy::OperandLifeTime;
904
905 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100906 if (!operand)
907 {
908 Fail("%s: failed to get input operand %i", __func__, inputIndex);
909 return LayerInputHandle();
910 }
911
912 if (!IsOperandTypeSupportedForTensors(operand->type))
913 {
914 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
915 return LayerInputHandle();
916 }
917
918 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
919
920 switch (operand->lifetime)
921 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100922 case HalOperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
923 case HalOperandLifeTime::MODEL_INPUT:
924 case HalOperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +0100925 {
926 // The tensor is either an operand internal to the model, or a model input.
927 // It can be associated with an ArmNN output slot for an existing layer.
928
929 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
930 const uint32_t operandIndex = operation.inputs[inputIndex];
931 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
932 break;
933 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100934 case HalOperandLifeTime::CONSTANT_COPY:
935 case HalOperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100936 {
937 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100938 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin<HalPolicy>(*operand, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100939 if (tensorPin.IsValid())
940 {
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100941 if (!IsLayerSupportedForAnyBackend(__func__,
942 armnn::IsConstantSupported,
943 data.m_Backends,
944 tensorPin.GetConstTensor().GetInfo()))
arovir01b0717b52018-09-05 17:03:25 +0100945 {
946 return LayerInputHandle();
947 }
948
949 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
950 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
951 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
952
953 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
954 }
955 else
956 {
957 Fail("%s: invalid operand tensor", __func__);
958 return LayerInputHandle();
959 }
960 break;
961 }
962 default:
963 {
964 // Unsupported lifetime for an input tensor
965 Fail("%s: unsupported lifetime for input tensor: %s",
966 __func__, toString(operand->lifetime).c_str());
967 return LayerInputHandle();
968 }
969 }
970}
971
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100972template<typename HalPolicy,
973 typename HalOperation = typename HalPolicy::Operation,
974 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100975bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
976 uint32_t operationOutputIndex,
977 armnn::IConnectableLayer& layer,
978 uint32_t layerOutputIndex,
979 const HalModel& model,
980 ConversionData& data)
981{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100982 using HalOperand = typename HalPolicy::Operand;
983
984 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, operationOutputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100985 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
986 {
987 return false;
988 }
989
990 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
991
992 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
993 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
994
995 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
996
997 return true;
998}
999
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001000template<typename HalPolicy,
1001 typename HalOperation = typename HalPolicy::Operation,
1002 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001003armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
1004 uint32_t inputIndex,
1005 const HalModel& model,
1006 ConversionData& data)
1007{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001008 using HalOperand = typename HalPolicy::Operand;
1009
1010 const HalOperand* operand = GetInputOperand<HalPolicy>(operation, inputIndex, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001011 if (!operand)
1012 {
1013 return armnn::DataLayout::NHWC;
1014 }
1015
1016 if (!IsBool(*operand))
1017 {
1018 return armnn::DataLayout::NHWC;
1019 }
1020
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001021 const void* valueAddress = GetOperandValueReadOnlyAddress<HalPolicy>(*operand, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001022 if (!valueAddress)
1023 {
1024 return armnn::DataLayout::NHWC;
1025 }
1026
1027 if (*(static_cast<const bool*>(valueAddress)))
1028 {
1029 return armnn::DataLayout::NCHW;
1030 }
1031 else
1032 {
1033 return armnn::DataLayout::NHWC;
1034 }
1035}
1036
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001037template<typename HalPolicy,
1038 typename HalOperation = typename HalPolicy::Operation,
1039 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001040bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
1041 uint32_t outputIndex,
1042 armnn::IConnectableLayer& layer,
1043 const HalModel& model,
1044 ConversionData& data)
1045{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001046 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, outputIndex, layer, outputIndex, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001047}
1048
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001049template<typename HalPolicy,
1050 typename HalOperation = typename HalPolicy::Operation,
1051 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001052bool ConvertToActivation(const HalOperation& operation,
1053 const char* operationName,
1054 const armnn::ActivationDescriptor& activationDesc,
1055 const HalModel& model,
1056 ConversionData& data)
1057{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001058 using HalOperand = typename HalPolicy::Operand;
1059
1060 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001061 if (!input.IsValid())
1062 {
1063 return Fail("%s: Input 0 is invalid", operationName);
1064 }
1065
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001066 const HalOperand* outputOperand = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001067 if (!outputOperand)
1068 {
1069 return false;
1070 }
1071 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001072 if (!IsLayerSupportedForAnyBackend(__func__,
1073 armnn::IsActivationSupported,
1074 data.m_Backends,
1075 input.GetTensorInfo(),
1076 outInfo,
1077 activationDesc))
arovir01b0717b52018-09-05 17:03:25 +01001078 {
1079 return false;
1080 }
1081
1082 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
1083 BOOST_ASSERT(layer != nullptr);
1084 input.Connect(layer->GetInputSlot(0));
1085
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001086 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001087}
1088
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001089template<typename HalPolicy,
1090 typename HalOperation = typename HalPolicy::Operation,
1091 typename HalModel = typename HalPolicy::Model>
arovir01b0717b52018-09-05 17:03:25 +01001092bool ConvertPooling2d(const HalOperation& operation,
1093 const char* operationName,
1094 armnn::PoolingAlgorithm poolType,
1095 const HalModel& model,
1096 ConversionData& data)
1097{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001098 using HalOperand = typename HalPolicy::Operand;
1099 using HalOperandType = typename HalPolicy::OperandType;
1100
1101 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001102 if (!input.IsValid())
1103 {
1104 return Fail("%s: Could not read input 0", operationName);
1105 }
1106
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001107 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001108 if (!output)
1109 {
1110 return Fail("%s: Could not read output 0", __func__);
1111 }
1112
1113 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1114 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1115
arovir01b0717b52018-09-05 17:03:25 +01001116 armnn::Pooling2dDescriptor desc;
1117 desc.m_PoolType = poolType;
1118 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001119 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001120
1121 ActivationFn activation;
1122
1123 if (operation.inputs.size() == 7)
1124 {
1125 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1126 android::nn::PaddingScheme scheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001127 if (!GetInputPaddingScheme<HalPolicy>(operation, 1, scheme, model, data) ||
1128 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1129 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1130 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1131 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1132 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001133 {
1134 return Fail("%s: Operation has invalid inputs", operationName);
1135 }
1136
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001137 const unsigned int inputWidth = inputInfo.GetShape()[2];
1138 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001139
1140 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1141 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1142 }
1143 else
1144 {
1145 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001146 if (!GetInputScalar<HalPolicy>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1147 !GetInputScalar<HalPolicy>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1148 !GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1149 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1150 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1151 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1152 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model, data) ||
1153 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight, model, data) ||
1154 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001155 {
1156 return Fail("%s: Operation has invalid inputs", operationName);
1157 }
1158 }
1159
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001160 if (!IsLayerSupportedForAnyBackend(__func__,
1161 armnn::IsPooling2dSupported,
1162 data.m_Backends,
1163 inputInfo,
1164 outputInfo,
1165 desc))
arovir01b0717b52018-09-05 17:03:25 +01001166 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001167 return false;
arovir01b0717b52018-09-05 17:03:25 +01001168 }
arovir01b0717b52018-09-05 17:03:25 +01001169
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001170 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1171 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001172 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001173 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001174 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001175
1176 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1177 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001178 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001179 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001180 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001181
1182 input.Connect(pooling2dLayer->GetInputSlot(0));
1183
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001184 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001185}
1186
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001187template<typename HalPolicy,
1188 typename HalOperation = typename HalPolicy::Operation,
1189 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001190bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1191{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001192 using HalOperand = typename HalPolicy::Operand;
1193 using HalOperandType = typename HalPolicy::OperandType;
1194
1195 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001196 if (!input.IsValid())
1197 {
1198 return Fail("%s: Operation has invalid inputs", __func__);
1199 }
1200
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001201 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001202 if (!output)
1203 {
1204 return Fail("%s: Could not read output 0", __func__);
1205 }
1206
1207 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1208 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1209
1210 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001211 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 1, model, data);
1212 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001213
1214 if (!weightsPin.IsValid() || !biasPin.IsValid())
1215 {
1216 return Fail("%s: Operation has invalid inputs", __func__);
1217 }
1218
1219 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1220 armnn::ConstTensor bias = biasPin.GetConstTensor();
1221 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1222
1223 armnn::Convolution2dDescriptor desc;
1224 desc.m_DataLayout = armnn::DataLayout::NHWC;
1225 ActivationFn activation;
1226
1227 if (operation.inputs.size() >= 10)
1228 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001229 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1230 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1231 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1232 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1233 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1234 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1235 !GetInputActivationFunction<HalPolicy>(operation, 9, activation, model, data) ||
1236 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 11, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001237 {
1238 return Fail("%s: Operation has invalid inputs", __func__);
1239 }
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001240 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 10, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001241 }
1242 else if (operation.inputs.size() >= 7)
1243 {
1244 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001245 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1246 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1247 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1248 !GetInputActivationFunction<HalPolicy>(operation, 6, activation, model, data) ||
1249 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 8, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001250 {
1251 return Fail("%s: Operation has invalid inputs", __func__);
1252 }
1253
1254 const uint32_t kernelX = weights.GetShape()[2];
1255 const uint32_t kernelY = weights.GetShape()[1];
1256 const uint32_t inputX = inputInfo.GetShape()[2];
1257 const uint32_t inputY = inputInfo.GetShape()[1];
1258
1259 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1260 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1261
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001262 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 7, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001263 }
1264 else
1265 {
1266 return Fail("%s: Unsupported number of operation inputs", __func__);
1267 }
1268
1269 desc.m_BiasEnabled = true;
1270 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1271
1272 if (!IsLayerSupportedForAnyBackend(__func__,
1273 armnn::IsConvolution2dSupported,
1274 data.m_Backends,
1275 inputInfo,
1276 outputInfo,
1277 desc,
1278 weights.GetInfo(),
1279 biases))
1280 {
1281 return false;
1282 }
1283
1284 armnn::IConnectableLayer* startLayer =
1285 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1286
1287 if (!startLayer)
1288 {
1289 return Fail("%s: AddConvolution2dLayer failed", __func__);
1290 }
1291
1292 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1293
1294 if (!endLayer)
1295 {
1296 return Fail("%s: ProcessActivation failed", __func__);
1297 }
1298
1299 input.Connect(startLayer->GetInputSlot(0));
1300
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001301 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001302}
1303
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001304template<typename HalPolicy,
1305 typename HalOperation = typename HalPolicy::Operation,
1306 typename HalModel = typename HalPolicy::Model>
Mike Kellyb5fdf382019-06-11 16:35:25 +01001307bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1308{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001309 using HalOperand = typename HalPolicy::Operand;
1310 using HalOperandType = typename HalPolicy::OperandType;
1311
1312 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001313
1314 if (!input.IsValid())
1315 {
1316 return Fail("%s: Operation has invalid inputs", __func__);
1317 }
1318
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001319 const HalOperand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001320
1321 if (!output)
1322 {
1323 return Fail("%s: Could not read output 0", __func__);
1324 }
1325
1326 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1327 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1328
1329 // ArmNN does not currently support non-fixed weights or bias
1330
1331 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001332 const HalOperand* weightsOperand = GetInputOperand<HalPolicy>(operation, 1, model);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001333
1334 if (weightsOperand == nullptr)
1335 {
1336 return Fail("%s: Operand is invalid", __func__);
1337 }
1338 armnn::DepthwiseConvolution2dDescriptor desc;
1339 desc.m_DataLayout = armnn::DataLayout::NHWC;
1340
1341 // Look ahead to find the optional DataLayout, if present
1342 if (operation.inputs.size() >= 12)
1343 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001344 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 11, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001345 }
1346 else if (operation.inputs.size() >= 9)
1347 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001348 desc.m_DataLayout = OptionalDataLayout<HalPolicy>(operation, 8, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001349 }
1350
1351 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1352 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
1353 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1354 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1355
1356 // Reinterpret weight data as [ H, W, I, M ]
1357 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1358 weightsOperand->dimensions[2],
1359 inputInfo.GetShape()[channelsIndex],
1360 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
1361
1362 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1363 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1364
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001365 const ConstTensorPin weightsPin =
1366 ConvertOperationInputToConstTensorPin<HalPolicy>(operation,
1367 1,
1368 model,
1369 data,
1370 HWIMToMIHW,
1371 &weightsShape);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001372
1373 // Bias is a 1D tensor
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001374 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalPolicy>(operation, 2, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +01001375
1376 if (!weightsPin.IsValid() || !biasPin.IsValid())
1377 {
1378 return Fail("%s: Operation has invalid inputs", __func__);
1379 }
1380
1381 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1382 armnn::ConstTensor bias = biasPin.GetConstTensor();
1383 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1384
1385 ActivationFn activation;
1386
1387 if (operation.inputs.size() >= 11)
1388 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001389 if (!GetInputScalar<HalPolicy>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model, data) ||
1390 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model, data) ||
1391 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model, data) ||
1392 !GetInputScalar<HalPolicy>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model, data) ||
1393 !GetInputScalar<HalPolicy>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1394 !GetInputScalar<HalPolicy>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1395 !GetInputActivationFunction<HalPolicy>(operation, 10, activation, model, data) ||
1396 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 12, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001397 {
1398 return Fail("%s: Operation has invalid inputs", __func__);
1399 }
1400 }
1401 else if (operation.inputs.size() >= 8)
1402 {
1403 android::nn::PaddingScheme paddingScheme;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001404 if (!GetInputPaddingScheme<HalPolicy>(operation, 3, paddingScheme, model, data) ||
1405 !GetInputScalar<HalPolicy>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model, data) ||
1406 !GetInputScalar<HalPolicy>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model, data) ||
1407 !GetInputActivationFunction<HalPolicy>(operation, 7, activation, model, data) ||
1408 !GetOptionalConvolutionDilationParams<HalPolicy>(operation, 9, desc, model, data))
Mike Kellyb5fdf382019-06-11 16:35:25 +01001409 {
1410 return Fail("%s: Operation has invalid inputs", __func__);
1411 }
1412
1413 const uint32_t kernelX = weights.GetShape()[3];
1414 const uint32_t kernelY = weights.GetShape()[2];
1415 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1416 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1417
1418 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1419 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1420 }
1421 else
1422 {
1423 return Fail("%s: Unsupported number of operation inputs", __func__);
1424 }
1425
1426 desc.m_BiasEnabled = true;
1427 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1428
1429 if (!IsLayerSupportedForAnyBackend(__func__,
1430 armnn::IsDepthwiseConvolutionSupported,
1431 data.m_Backends,
1432 inputInfo,
1433 outputInfo,
1434 desc,
1435 weights.GetInfo(),
1436 biases))
1437 {
1438 return false;
1439 }
1440
1441 armnn::IConnectableLayer* startLayer =
1442 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1443 if (!startLayer)
1444 {
1445 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1446 }
1447
1448 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1449 if (!endLayer)
1450 {
1451 return Fail("%s: ProcessActivation failed", __func__);
1452 }
1453
1454 input.Connect(startLayer->GetInputSlot(0));
1455
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001456 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001457}
1458
saoste01b8471482018-10-10 09:44:51 +01001459} // namespace armnn_driver