blob: d7025aa5e2d0569fd1798b113131f91b947b4e28 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Colm Donelanb4ef1632024-02-01 15:00:43 +00002// Copyright © 2017-2024 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
telsoa014fcda012018-03-09 14:13:49 +00007#include <armnn/Descriptors.hpp>
Colm Donelanb4ef1632024-02-01 15:00:43 +00008#include <armnn/Exceptions.hpp>
Aron Virginas-Tar5c3e9232018-11-16 11:00:48 +00009#include <armnn/Tensor.hpp>
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +010010#include <armnn/utility/NumericCast.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000011#include <armnn/backends/WorkloadData.hpp>
Mike Kelly363b5722023-10-11 14:25:50 +010012#include <armnnUtils/TensorUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
Teresa Charlinec5f7d12021-10-22 17:15:00 +010014#include <arm_compute/runtime/FunctionDescriptors.h>
Nikhil Raj038f52b2023-07-31 10:06:32 +010015#include <arm_compute/function_info/FullyConnectedLayerInfo.h>
telsoa014fcda012018-03-09 14:13:49 +000016
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +010017#if defined(ARMCOMPUTENEON_ENABLED)
18#include "neon/workloads/NeonReduceWorkload.hpp"
19#endif
20
21#if defined(ARMCOMPUTECL_ENABLED)
22#include "cl/workloads/ClReduceWorkload.hpp"
23#endif
24
telsoa014fcda012018-03-09 14:13:49 +000025namespace armnn
26{
27
28inline arm_compute::NormalizationLayerInfo
Matteo Martincigh539b44d2018-10-01 09:26:39 +010029CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo& tensorInfo,
30 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +000031{
Matteo Martincigh539b44d2018-10-01 09:26:39 +010032 unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3;
33 const unsigned int depth = tensorInfo.GetShape()[depthDimension];
telsoa014fcda012018-03-09 14:13:49 +000034
35 // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of
36 // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose
37 // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization
38 // parameters.
39 //
40 // Please refer to both the reference implementation of the normalization layer and the implementation of
41 // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below.
42
43 // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd.
44 // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in
45 // ACL's normalization_layer_cross_map() CL function.
46 const uint32_t normSize = depth * 2u + 1u;
47
48 // See ACL's NormalizationLayerInfo::scale_coeff() definition.
49 // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead.
50 const float alpha = 1.0f;
51
telsoa01c577f2c2018-08-31 09:22:23 +010052 // Don't offset the reduction.
telsoa014fcda012018-03-09 14:13:49 +000053 const float kappa = 0.0f;
54
55 // pow(reduction, -0.5) = 1 / sqrt(reduction)
56 const float beta = 0.5f;
57
58 return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false);
59}
60
61inline arm_compute::ActivationLayerInfo::ActivationFunction
62ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction)
63{
64 using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction;
65
66 switch (armnnFunction)
67 {
68 case ActivationFunction::Linear: return AclActivationFunction::LINEAR;
telsoa01c577f2c2018-08-31 09:22:23 +010069 // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function.
telsoa014fcda012018-03-09 14:13:49 +000070 case ActivationFunction::Sigmoid: return AclActivationFunction::LOGISTIC;
71 case ActivationFunction::ReLu: return AclActivationFunction::RELU;
72 case ActivationFunction::BoundedReLu: return AclActivationFunction::LU_BOUNDED_RELU;
73 case ActivationFunction::SoftReLu: return AclActivationFunction::SOFT_RELU;
74 case ActivationFunction::LeakyReLu: return AclActivationFunction::LEAKY_RELU;
75 case ActivationFunction::Abs: return AclActivationFunction::ABS;
76 case ActivationFunction::Sqrt: return AclActivationFunction::SQRT;
77 case ActivationFunction::Square: return AclActivationFunction::SQUARE;
78 case ActivationFunction::TanH: return AclActivationFunction::TANH;
David Monahan3b3c3812020-02-25 09:03:29 +000079 case ActivationFunction::Elu: return AclActivationFunction::ELU;
Jan Eilersa83af7b2020-03-18 15:58:11 +000080 case ActivationFunction::HardSwish: return AclActivationFunction::HARD_SWISH;
Teresa Charlin077cddb2023-09-15 15:19:21 +010081 case ActivationFunction::Gelu: return AclActivationFunction::GELU;
telsoa014fcda012018-03-09 14:13:49 +000082 default: throw InvalidArgumentException("Unsupported activation function");
83 }
84}
85
86inline arm_compute::ActivationLayerInfo
87ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor& actDesc)
88{
89 return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function),
90 actDesc.m_A, actDesc.m_B);
91}
92
Mike Kelly07810fc2020-11-12 10:58:48 +000093inline arm_compute::ActivationLayerInfo
94ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor* activationDescPtr)
95{
96 if (activationDescPtr != nullptr)
97 {
98 return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
99 *activationDescPtr));
100 }
101 return arm_compute::ActivationLayerInfo();
102}
103
104inline arm_compute::ActivationLayerInfo
105ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor& queueDescriptor)
106{
107 const ActivationDescriptor* activationDescPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
108
109 if (activationDescPtr != nullptr)
110 {
111 return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
112 *activationDescPtr));
113 }
114 return arm_compute::ActivationLayerInfo();
115}
116
Cathal Corbettfd5bec42022-03-03 15:13:23 +0000117inline arm_compute::ActivationLayerInfo
118ConvertLstmActivationFuncToAclLayerInfo(uint32_t activationFunction)
119{
120 // For preparing the object for the class ActivationLayerInfo, we need to consider 5 situations.
121 switch (activationFunction)
122 {
123 case 0:
124 return arm_compute::ActivationLayerInfo(); // no activation, do nothing
125 case 1:
126 return arm_compute::ActivationLayerInfo(arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
127 case 3:
128 return arm_compute::ActivationLayerInfo(
129 arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
130 case 4:
131 return arm_compute::ActivationLayerInfo(
132 arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
133 case 6:
134 return arm_compute::ActivationLayerInfo(
135 arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
136 default:
137 throw armnn::Exception("Wrong Type of Activation Function!");
138 }
139}
140
Teresa Charlin2b030d92020-03-27 16:40:56 +0000141inline arm_compute::ComparisonOperation ConvertComparisonOperationToAcl(const ComparisonDescriptor& descriptor)
142{
143 switch (descriptor.m_Operation)
144 {
145 case ComparisonOperation::Greater: return arm_compute::ComparisonOperation::Greater;
146 case ComparisonOperation::GreaterOrEqual: return arm_compute::ComparisonOperation::GreaterEqual;
147 case ComparisonOperation::Less: return arm_compute::ComparisonOperation::Less;
148 case ComparisonOperation::LessOrEqual: return arm_compute::ComparisonOperation::LessEqual;
149 case ComparisonOperation::Equal: return arm_compute::ComparisonOperation::Equal;
150 case ComparisonOperation::NotEqual: return arm_compute::ComparisonOperation::NotEqual;
151 default: throw InvalidArgumentException("Unsupported comparison function");
152 }
153}
154
telsoa014fcda012018-03-09 14:13:49 +0000155inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
156{
157 using arm_compute::PoolingType;
158
159 switch (poolingAlgorithm)
160 {
161 case PoolingAlgorithm::Max: return PoolingType::MAX;
162 case PoolingAlgorithm::Average: return PoolingType::AVG;
163 case PoolingAlgorithm::L2: return PoolingType::L2;
164 default: throw InvalidArgumentException("Unsupported pooling algorithm");
165 }
166}
167
168inline arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding
169 rounding)
170{
171 using arm_compute::DimensionRoundingType;
172
173 switch (rounding)
174 {
175 case OutputShapeRounding::Ceiling: return DimensionRoundingType::CEIL;
176 case OutputShapeRounding::Floor: return DimensionRoundingType::FLOOR;
177 default: throw InvalidArgumentException("Unsupported Output Shape Rounding type");
178 }
179}
180
181inline arm_compute::NormType
182ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
183{
184 using arm_compute::NormType;
185 switch (channelType)
186 {
187 case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP;
188 case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D;
189 default: throw InvalidArgumentException("Unsupported normalization algorithm channel type");
190 }
191}
192
telsoa01c577f2c2018-08-31 09:22:23 +0100193inline arm_compute::FullyConnectedLayerInfo
Mike Kelly07810fc2020-11-12 10:58:48 +0000194ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc,
195 const ActivationDescriptor* activationDesc)
telsoa01c577f2c2018-08-31 09:22:23 +0100196{
197 arm_compute::FullyConnectedLayerInfo fc_info;
198 fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
Mike Kelly07810fc2020-11-12 10:58:48 +0000199 fc_info.activation_info = ConvertActivationDescriptorToAclActivationLayerInfo(activationDesc);
200 return fc_info;
201}
202
203inline arm_compute::FullyConnectedLayerInfo
204ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc,
205 arm_compute::ActivationLayerInfo activationLayerInfo)
206{
207 arm_compute::FullyConnectedLayerInfo fc_info;
208 fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
209 fc_info.activation_info = activationLayerInfo;
telsoa01c577f2c2018-08-31 09:22:23 +0100210 return fc_info;
211}
212
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +0100213inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
214{
215 switch (resizeMethod)
216 {
217 case ResizeMethod::Bilinear:
218 return arm_compute::InterpolationPolicy::BILINEAR;
219 case ResizeMethod::NearestNeighbor:
220 return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
221 default:
222 throw InvalidArgumentException("Unsupported resize method");
223 }
224}
225
Teresa Charlinc1f6b092020-05-11 16:10:38 +0100226template<typename T>
227inline T ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn::TensorInfo& tensor)
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000228{
David Monahan9b14bfc2020-06-30 15:57:56 +0100229 // Detect the Android default value of -1 and return the ACL default value of 0.
Colm Donelanc3c5fc22019-08-15 16:03:17 +0100230 if (softmaxDesc.m_Axis == -1)
231 {
David Monahan9b14bfc2020-06-30 15:57:56 +0100232 return 0;
Colm Donelanc3c5fc22019-08-15 16:03:17 +0100233 }
234
David Monahan9b14bfc2020-06-30 15:57:56 +0100235 unsigned int dim = tensor.GetNumDimensions();
Colm Donelanb4ef1632024-02-01 15:00:43 +0000236 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(dim != 0, "The number of dimensions in this tensor cannot be zero.");
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000237
238 // Currently ArmNN support axis 1.
David Monahan9b14bfc2020-06-30 15:57:56 +0100239 auto aclAxis = (static_cast<T>(dim) - 1);
240 aclAxis = aclAxis > 0 ? aclAxis -1 : aclAxis;
241
242 return aclAxis;
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000243}
244
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100245inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input)
246{
247 unsigned int numSplit = desc.GetNumViews();
248 unsigned int numDimensions = desc.GetNumDimensions();
249 std::set<unsigned int> splitAxis;
250
Mike Kelly363b5722023-10-11 14:25:50 +0100251 if (desc.HasAxis())
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100252 {
Mike Kelly363b5722023-10-11 14:25:50 +0100253 splitAxis.insert(armnnUtils::GetUnsignedAxis(desc.GetNumDimensions(), desc.GetAxis()));
254 }
255 else
256 {
257 for (unsigned int i = 0; i < numSplit; ++i)
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100258 {
Mike Kelly363b5722023-10-11 14:25:50 +0100259 for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100260 {
Mike Kelly363b5722023-10-11 14:25:50 +0100261 if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
262 {
263 splitAxis.insert(dimIdx);
264 }
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100265 }
266 }
267 }
268 return splitAxis;
269}
270
Teresa Charlin7ac3ca62020-07-28 15:17:12 +0100271/// Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank, rank)
Teresa Charlinf540eb82020-04-10 19:24:55 +0100272inline int ComputeAclAxis(const int& armnnAxis, const armnn::TensorInfo& tensor)
273{
Teresa Charlin7ac3ca62020-07-28 15:17:12 +0100274 int rank = static_cast<int>(tensor.GetNumDimensions());
Teresa Charlinf540eb82020-04-10 19:24:55 +0100275
Colm Donelanb4ef1632024-02-01 15:00:43 +0000276 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(rank != 0, "The number of dimensions in this tensor cannot be zero.");
277 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(armnnAxis < rank, "Incompatible value of armnnAxis.");
278 ARMNN_THROW_INVALIDARG_MSG_IF_FALSE((-1 * rank) <= armnnAxis, "Incompatible value of armnnAxis.");
Teresa Charlinf540eb82020-04-10 19:24:55 +0100279
280 int sign = (armnnAxis < 0) ? -1 : 1;
Teresa Charlin7ac3ca62020-07-28 15:17:12 +0100281 int aclAxis = sign * rank - 1 - armnnAxis;
Teresa Charlinf540eb82020-04-10 19:24:55 +0100282
283 return aclAxis;
284}
285
Teresa Charlinec5f7d12021-10-22 17:15:00 +0100286/// Utility function used to setup an arm_compute::Conv3dInfo object from convolution3d descriptor.
287inline arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dDescriptor descriptor,
288 bool isFastMathEnabled,
289 const ActivationDescriptor* activationDescriptor)
290{
291 const arm_compute::Size3D stride{descriptor.m_StrideX, descriptor.m_StrideY, descriptor.m_StrideZ};
292 const arm_compute::Padding3D padding{descriptor.m_PadLeft, descriptor.m_PadRight,
293 descriptor.m_PadTop, descriptor.m_PadBottom,
294 descriptor.m_PadFront, descriptor.m_PadBack};
295 const arm_compute::Size3D dilation{descriptor.m_DilationX, descriptor.m_DilationY, descriptor.m_DilationZ};
296
297 const arm_compute::ActivationLayerInfo activationInfo =
298 ConvertActivationDescriptorToAclActivationLayerInfo(activationDescriptor);
299 const auto roundType = arm_compute::DimensionRoundingType::FLOOR;
300
301 return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
302}
303
304inline arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dQueueDescriptor queueDescriptor,
305 bool isFastMathEnabled)
306{
307 auto descriptor = queueDescriptor.m_Parameters;
308 const arm_compute::Size3D stride{descriptor.m_StrideX, descriptor.m_StrideY, descriptor.m_StrideZ};
309 const arm_compute::Padding3D padding{descriptor.m_PadLeft, descriptor.m_PadRight,
310 descriptor.m_PadTop, descriptor.m_PadBottom,
311 descriptor.m_PadFront, descriptor.m_PadBack};
312 const arm_compute::Size3D dilation{descriptor.m_DilationX, descriptor.m_DilationY, descriptor.m_DilationZ};
313
314 const arm_compute::ActivationLayerInfo activationInfo =
315 ConvertAdditionalInfoToAclActivationLayerInfo(queueDescriptor);
316 const auto roundType = arm_compute::DimensionRoundingType::FLOOR;
317
318 return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
319}
320
Matthew Sloyan2e5d0b22021-10-21 14:05:31 +0100321inline arm_compute::PaddingMode ConvertPaddingModeToAcl(const PaddingMode& paddingMode)
322{
323 switch (paddingMode)
324 {
325 case PaddingMode::Constant: return arm_compute::PaddingMode::CONSTANT;
326 case PaddingMode::Reflect: return arm_compute::PaddingMode::REFLECT;
327 case PaddingMode::Symmetric: return arm_compute::PaddingMode::SYMMETRIC;
328 default: throw InvalidArgumentException("Unsupported Padding Mode");
329 }
330}
331
Sadik Armagana2747482021-02-09 10:28:54 +0000332inline arm_compute::ReductionOperation ConvertReductionOperationToAcl(const ReduceDescriptor& descriptor)
333{
334 switch (descriptor.m_ReduceOperation)
335 {
336 case ReduceOperation::Sum: return arm_compute::ReductionOperation::SUM;
337 case ReduceOperation::Mean: return arm_compute::ReductionOperation::MEAN_SUM;
338 case ReduceOperation::Max: return arm_compute::ReductionOperation::MAX;
339 case ReduceOperation::Min: return arm_compute::ReductionOperation::MIN;
Teresa Charlin4e3e8312021-08-05 12:34:37 +0100340 case ReduceOperation::Prod: return arm_compute::ReductionOperation::PROD;
341 default: throw InvalidArgumentException("Unsupported Reduction operation");
Sadik Armagana2747482021-02-09 10:28:54 +0000342 }
343}
344
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +0100345/// Function to compute the output tensor shape based on the axes and if keepDims is set.
346inline const TensorInfo ComputeReductionTensorShape(const armnn::TensorInfo& input,
347 const std::vector<uint32_t>& vAxis,
348 const bool keepDims)
349{
350 auto reducedTensorInfo = input;
351 unsigned int rank = reducedTensorInfo.GetNumDimensions();
352 unsigned int outputRank = 0;
353 // Calculate output dimension
354 if (keepDims)
355 {
356 outputRank = rank;
357 }
358 else if (vAxis.empty())
359 {
360 outputRank = 1;
361 }
362 else if (vAxis.size() > reducedTensorInfo.GetNumDimensions())
363 {
364 throw LayerValidationException("ReduceLayer: Dimensions to reduce can not be bigger than input dimensions");
365 }
366 else
367 {
368 outputRank = reducedTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(vAxis.size());
369 if (outputRank == 0)
370 {
371 outputRank = 1;
372 }
373 }
374 std::vector<unsigned int> dimSizes(outputRank, 1);
375 if (!vAxis.empty())
376 {
377 // Skip the dimension that has been reduced unless keepDims is true.
378 unsigned int outputIndex = 0;
379 for (unsigned int i = 0; i < reducedTensorInfo.GetNumDimensions(); ++i)
380 {
381 if (std::find(vAxis.begin(), vAxis.end(), i) == vAxis.end())
382 {
383 dimSizes[outputIndex] = armnn::numeric_cast<unsigned int>(reducedTensorInfo.GetShape()[i]);
384 ++outputIndex;
385 }
386 else if (keepDims)
387 {
388 dimSizes[outputIndex] = 1;
389 ++outputIndex;
390 }
391 }
392 }
393 const TensorShape inferredShape = TensorShape(outputRank, dimSizes.data());
394 reducedTensorInfo.SetShape(inferredShape);
395 return reducedTensorInfo;
396}
397
398/// Macro function check if layer with multiple axes is supported on each backend
399#define IS_MULTI_AXES_REDUCE_SUPPORTED(func, input, desc, status) \
400 armnn::TensorInfo inputTensorInfo = input; \
401 unsigned int recalulatedAxis = 0; \
402 std::vector<uint32_t> axes; \
403 \
404 for (unsigned int i = 0; i != desc.m_vAxis.size(); ++i) \
405 { \
406 axes.emplace_back(desc.m_vAxis[i]); \
407 \
408 const armnn::TensorInfo& reducedTensorInfo = \
409 ComputeReductionTensorShape(input, axes, desc.m_KeepDims); \
410 \
411 std::vector<uint32_t> singleAxis(1, desc.m_vAxis[i] - recalulatedAxis); \
412 \
413 armnn::ReduceDescriptor newReduceDescriptor = desc; \
414 newReduceDescriptor.m_vAxis.assign(singleAxis.begin(), singleAxis.end()); \
415 \
416 status = func(inputTensorInfo, reducedTensorInfo, newReduceDescriptor); \
417 if (!status) \
418 { \
419 break; \
420 } \
421 \
422 if (!desc.m_KeepDims) \
423 { \
424 recalulatedAxis++; \
425 } \
426 \
427 inputTensorInfo = reducedTensorInfo; \
428 }
429
Aron Virginas-Tar5c3e9232018-11-16 11:00:48 +0000430} // namespace armnn