blob: 9552b7620a463f00c2996057be911216dea30e74 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5#pragma once
6
telsoa014fcda012018-03-09 14:13:49 +00007#include <armnn/Descriptors.hpp>
Aron Virginas-Tar5c3e9232018-11-16 11:00:48 +00008#include <armnn/Tensor.hpp>
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +01009#include <armnn/utility/Assert.hpp>
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +010010#include <armnn/utility/NumericCast.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000011#include <armnn/backends/WorkloadData.hpp>
Mike Kelly4980e212023-08-04 13:35:41 +010012#include <armnnUtils/TensorUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
Teresa Charlinec5f7d12021-10-22 17:15:00 +010014#include <arm_compute/runtime/FunctionDescriptors.h>
Nikhil Raj038f52b2023-07-31 10:06:32 +010015#include <arm_compute/function_info/FullyConnectedLayerInfo.h>
telsoa014fcda012018-03-09 14:13:49 +000016
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +010017#if defined(ARMCOMPUTENEON_ENABLED)
18#include "neon/workloads/NeonReduceWorkload.hpp"
19#endif
20
21#if defined(ARMCOMPUTECL_ENABLED)
22#include "cl/workloads/ClReduceWorkload.hpp"
23#endif
24
telsoa014fcda012018-03-09 14:13:49 +000025namespace armnn
26{
27
28inline arm_compute::NormalizationLayerInfo
Matteo Martincigh539b44d2018-10-01 09:26:39 +010029CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo& tensorInfo,
30 armnn::DataLayout dataLayout)
telsoa014fcda012018-03-09 14:13:49 +000031{
Matteo Martincigh539b44d2018-10-01 09:26:39 +010032 unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3;
33 const unsigned int depth = tensorInfo.GetShape()[depthDimension];
telsoa014fcda012018-03-09 14:13:49 +000034
35 // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of
36 // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose
37 // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization
38 // parameters.
39 //
40 // Please refer to both the reference implementation of the normalization layer and the implementation of
41 // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below.
42
43 // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd.
44 // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in
45 // ACL's normalization_layer_cross_map() CL function.
46 const uint32_t normSize = depth * 2u + 1u;
47
48 // See ACL's NormalizationLayerInfo::scale_coeff() definition.
49 // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead.
50 const float alpha = 1.0f;
51
telsoa01c577f2c2018-08-31 09:22:23 +010052 // Don't offset the reduction.
telsoa014fcda012018-03-09 14:13:49 +000053 const float kappa = 0.0f;
54
55 // pow(reduction, -0.5) = 1 / sqrt(reduction)
56 const float beta = 0.5f;
57
58 return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false);
59}
60
61inline arm_compute::ActivationLayerInfo::ActivationFunction
62ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction)
63{
64 using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction;
65
66 switch (armnnFunction)
67 {
68 case ActivationFunction::Linear: return AclActivationFunction::LINEAR;
telsoa01c577f2c2018-08-31 09:22:23 +010069 // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function.
telsoa014fcda012018-03-09 14:13:49 +000070 case ActivationFunction::Sigmoid: return AclActivationFunction::LOGISTIC;
71 case ActivationFunction::ReLu: return AclActivationFunction::RELU;
72 case ActivationFunction::BoundedReLu: return AclActivationFunction::LU_BOUNDED_RELU;
73 case ActivationFunction::SoftReLu: return AclActivationFunction::SOFT_RELU;
74 case ActivationFunction::LeakyReLu: return AclActivationFunction::LEAKY_RELU;
75 case ActivationFunction::Abs: return AclActivationFunction::ABS;
76 case ActivationFunction::Sqrt: return AclActivationFunction::SQRT;
77 case ActivationFunction::Square: return AclActivationFunction::SQUARE;
78 case ActivationFunction::TanH: return AclActivationFunction::TANH;
David Monahan3b3c3812020-02-25 09:03:29 +000079 case ActivationFunction::Elu: return AclActivationFunction::ELU;
Jan Eilersa83af7b2020-03-18 15:58:11 +000080 case ActivationFunction::HardSwish: return AclActivationFunction::HARD_SWISH;
telsoa014fcda012018-03-09 14:13:49 +000081 default: throw InvalidArgumentException("Unsupported activation function");
82 }
83}
84
85inline arm_compute::ActivationLayerInfo
86ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor& actDesc)
87{
88 return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function),
89 actDesc.m_A, actDesc.m_B);
90}
91
Mike Kelly07810fc2020-11-12 10:58:48 +000092inline arm_compute::ActivationLayerInfo
93ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor* activationDescPtr)
94{
95 if (activationDescPtr != nullptr)
96 {
97 return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
98 *activationDescPtr));
99 }
100 return arm_compute::ActivationLayerInfo();
101}
102
103inline arm_compute::ActivationLayerInfo
104ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor& queueDescriptor)
105{
106 const ActivationDescriptor* activationDescPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
107
108 if (activationDescPtr != nullptr)
109 {
110 return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>(
111 *activationDescPtr));
112 }
113 return arm_compute::ActivationLayerInfo();
114}
115
Cathal Corbettfd5bec42022-03-03 15:13:23 +0000116inline arm_compute::ActivationLayerInfo
117ConvertLstmActivationFuncToAclLayerInfo(uint32_t activationFunction)
118{
119 // For preparing the object for the class ActivationLayerInfo, we need to consider 5 situations.
120 switch (activationFunction)
121 {
122 case 0:
123 return arm_compute::ActivationLayerInfo(); // no activation, do nothing
124 case 1:
125 return arm_compute::ActivationLayerInfo(arm_compute::ActivationLayerInfo::ActivationFunction::RELU);
126 case 3:
127 return arm_compute::ActivationLayerInfo(
128 arm_compute::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.0);
129 case 4:
130 return arm_compute::ActivationLayerInfo(
131 arm_compute::ActivationLayerInfo::ActivationFunction::TANH, 1.0, 1.0);
132 case 6:
133 return arm_compute::ActivationLayerInfo(
134 arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC);
135 default:
136 throw armnn::Exception("Wrong Type of Activation Function!");
137 }
138}
139
Teresa Charlin2b030d92020-03-27 16:40:56 +0000140inline arm_compute::ComparisonOperation ConvertComparisonOperationToAcl(const ComparisonDescriptor& descriptor)
141{
142 switch (descriptor.m_Operation)
143 {
144 case ComparisonOperation::Greater: return arm_compute::ComparisonOperation::Greater;
145 case ComparisonOperation::GreaterOrEqual: return arm_compute::ComparisonOperation::GreaterEqual;
146 case ComparisonOperation::Less: return arm_compute::ComparisonOperation::Less;
147 case ComparisonOperation::LessOrEqual: return arm_compute::ComparisonOperation::LessEqual;
148 case ComparisonOperation::Equal: return arm_compute::ComparisonOperation::Equal;
149 case ComparisonOperation::NotEqual: return arm_compute::ComparisonOperation::NotEqual;
150 default: throw InvalidArgumentException("Unsupported comparison function");
151 }
152}
153
telsoa014fcda012018-03-09 14:13:49 +0000154inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm)
155{
156 using arm_compute::PoolingType;
157
158 switch (poolingAlgorithm)
159 {
160 case PoolingAlgorithm::Max: return PoolingType::MAX;
161 case PoolingAlgorithm::Average: return PoolingType::AVG;
162 case PoolingAlgorithm::L2: return PoolingType::L2;
163 default: throw InvalidArgumentException("Unsupported pooling algorithm");
164 }
165}
166
167inline arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding
168 rounding)
169{
170 using arm_compute::DimensionRoundingType;
171
172 switch (rounding)
173 {
174 case OutputShapeRounding::Ceiling: return DimensionRoundingType::CEIL;
175 case OutputShapeRounding::Floor: return DimensionRoundingType::FLOOR;
176 default: throw InvalidArgumentException("Unsupported Output Shape Rounding type");
177 }
178}
179
180inline arm_compute::NormType
181ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType)
182{
183 using arm_compute::NormType;
184 switch (channelType)
185 {
186 case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP;
187 case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D;
188 default: throw InvalidArgumentException("Unsupported normalization algorithm channel type");
189 }
190}
191
telsoa01c577f2c2018-08-31 09:22:23 +0100192inline arm_compute::FullyConnectedLayerInfo
Mike Kelly07810fc2020-11-12 10:58:48 +0000193ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc,
194 const ActivationDescriptor* activationDesc)
telsoa01c577f2c2018-08-31 09:22:23 +0100195{
196 arm_compute::FullyConnectedLayerInfo fc_info;
197 fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
Mike Kelly07810fc2020-11-12 10:58:48 +0000198 fc_info.activation_info = ConvertActivationDescriptorToAclActivationLayerInfo(activationDesc);
199 return fc_info;
200}
201
202inline arm_compute::FullyConnectedLayerInfo
203ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc,
204 arm_compute::ActivationLayerInfo activationLayerInfo)
205{
206 arm_compute::FullyConnectedLayerInfo fc_info;
207 fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix;
208 fc_info.activation_info = activationLayerInfo;
telsoa01c577f2c2018-08-31 09:22:23 +0100209 return fc_info;
210}
211
Aron Virginas-Tarcc0cefb2019-07-02 17:25:47 +0100212inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod)
213{
214 switch (resizeMethod)
215 {
216 case ResizeMethod::Bilinear:
217 return arm_compute::InterpolationPolicy::BILINEAR;
218 case ResizeMethod::NearestNeighbor:
219 return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR;
220 default:
221 throw InvalidArgumentException("Unsupported resize method");
222 }
223}
224
Teresa Charlinc1f6b092020-05-11 16:10:38 +0100225template<typename T>
226inline T ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn::TensorInfo& tensor)
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000227{
David Monahan9b14bfc2020-06-30 15:57:56 +0100228 // Detect the Android default value of -1 and return the ACL default value of 0.
Colm Donelanc3c5fc22019-08-15 16:03:17 +0100229 if (softmaxDesc.m_Axis == -1)
230 {
David Monahan9b14bfc2020-06-30 15:57:56 +0100231 return 0;
Colm Donelanc3c5fc22019-08-15 16:03:17 +0100232 }
233
David Monahan9b14bfc2020-06-30 15:57:56 +0100234 unsigned int dim = tensor.GetNumDimensions();
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000235
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100236 ARMNN_ASSERT(dim != 0);
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000237
238 // Currently ArmNN support axis 1.
David Monahan9b14bfc2020-06-30 15:57:56 +0100239 auto aclAxis = (static_cast<T>(dim) - 1);
240 aclAxis = aclAxis > 0 ? aclAxis -1 : aclAxis;
241
242 return aclAxis;
Narumol Prangnawarat65d30962019-03-14 11:55:03 +0000243}
244
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100245inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input)
246{
247 unsigned int numSplit = desc.GetNumViews();
248 unsigned int numDimensions = desc.GetNumDimensions();
249 std::set<unsigned int> splitAxis;
250
Mike Kelly4980e212023-08-04 13:35:41 +0100251 if (desc.HasAxis())
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100252 {
Mike Kelly4980e212023-08-04 13:35:41 +0100253 splitAxis.insert(armnnUtils::GetUnsignedAxis(desc.GetNumDimensions(), desc.GetAxis()));
254 }
255 else
256 {
257 for (unsigned int i = 0; i < numSplit; ++i)
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100258 {
Mike Kelly4980e212023-08-04 13:35:41 +0100259 for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx)
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100260 {
Mike Kelly4980e212023-08-04 13:35:41 +0100261 if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx])
262 {
263 splitAxis.insert(dimIdx);
264 }
Narumol Prangnawarat15eb5832019-05-20 15:31:05 +0100265 }
266 }
267 }
268 return splitAxis;
269}
270
Teresa Charlin7ac3ca62020-07-28 15:17:12 +0100271/// Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank, rank)
Teresa Charlinf540eb82020-04-10 19:24:55 +0100272inline int ComputeAclAxis(const int& armnnAxis, const armnn::TensorInfo& tensor)
273{
Teresa Charlin7ac3ca62020-07-28 15:17:12 +0100274 int rank = static_cast<int>(tensor.GetNumDimensions());
Teresa Charlinf540eb82020-04-10 19:24:55 +0100275
Teresa Charlin7ac3ca62020-07-28 15:17:12 +0100276 ARMNN_ASSERT(rank != 0);
277 ARMNN_ASSERT((-1 * rank) <= armnnAxis);
278 ARMNN_ASSERT(armnnAxis < rank);
Teresa Charlinf540eb82020-04-10 19:24:55 +0100279
280 int sign = (armnnAxis < 0) ? -1 : 1;
Teresa Charlin7ac3ca62020-07-28 15:17:12 +0100281 int aclAxis = sign * rank - 1 - armnnAxis;
Teresa Charlinf540eb82020-04-10 19:24:55 +0100282
283 return aclAxis;
284}
285
Teresa Charlin7ac3ca62020-07-28 15:17:12 +0100286/// Function to convert axis to its positive equivalent value.
287/// [-rank, rank) --> [0, rank)
288inline unsigned int ComputePositiveAxis(const int& axis, const armnn::TensorInfo& tensor)
289{
290 int rank = static_cast<int>(tensor.GetNumDimensions());
291
292 ARMNN_ASSERT(rank != 0);
293 ARMNN_ASSERT((-1 * rank) <= axis);
294 ARMNN_ASSERT(axis < rank);
295
296 int positiveAxis = (axis < 0) ? rank + axis : axis;
297 return static_cast<unsigned int>(positiveAxis);
298}
299
Teresa Charlinec5f7d12021-10-22 17:15:00 +0100300/// Utility function used to setup an arm_compute::Conv3dInfo object from convolution3d descriptor.
301inline arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dDescriptor descriptor,
302 bool isFastMathEnabled,
303 const ActivationDescriptor* activationDescriptor)
304{
305 const arm_compute::Size3D stride{descriptor.m_StrideX, descriptor.m_StrideY, descriptor.m_StrideZ};
306 const arm_compute::Padding3D padding{descriptor.m_PadLeft, descriptor.m_PadRight,
307 descriptor.m_PadTop, descriptor.m_PadBottom,
308 descriptor.m_PadFront, descriptor.m_PadBack};
309 const arm_compute::Size3D dilation{descriptor.m_DilationX, descriptor.m_DilationY, descriptor.m_DilationZ};
310
311 const arm_compute::ActivationLayerInfo activationInfo =
312 ConvertActivationDescriptorToAclActivationLayerInfo(activationDescriptor);
313 const auto roundType = arm_compute::DimensionRoundingType::FLOOR;
314
315 return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
316}
317
318inline arm_compute::Conv3dInfo ComputeConv3DInfo(const armnn::Convolution3dQueueDescriptor queueDescriptor,
319 bool isFastMathEnabled)
320{
321 auto descriptor = queueDescriptor.m_Parameters;
322 const arm_compute::Size3D stride{descriptor.m_StrideX, descriptor.m_StrideY, descriptor.m_StrideZ};
323 const arm_compute::Padding3D padding{descriptor.m_PadLeft, descriptor.m_PadRight,
324 descriptor.m_PadTop, descriptor.m_PadBottom,
325 descriptor.m_PadFront, descriptor.m_PadBack};
326 const arm_compute::Size3D dilation{descriptor.m_DilationX, descriptor.m_DilationY, descriptor.m_DilationZ};
327
328 const arm_compute::ActivationLayerInfo activationInfo =
329 ConvertAdditionalInfoToAclActivationLayerInfo(queueDescriptor);
330 const auto roundType = arm_compute::DimensionRoundingType::FLOOR;
331
332 return arm_compute::Conv3dInfo{stride, padding, activationInfo, dilation, roundType, isFastMathEnabled};
333}
334
Matthew Sloyan2e5d0b22021-10-21 14:05:31 +0100335inline arm_compute::PaddingMode ConvertPaddingModeToAcl(const PaddingMode& paddingMode)
336{
337 switch (paddingMode)
338 {
339 case PaddingMode::Constant: return arm_compute::PaddingMode::CONSTANT;
340 case PaddingMode::Reflect: return arm_compute::PaddingMode::REFLECT;
341 case PaddingMode::Symmetric: return arm_compute::PaddingMode::SYMMETRIC;
342 default: throw InvalidArgumentException("Unsupported Padding Mode");
343 }
344}
345
Sadik Armagana2747482021-02-09 10:28:54 +0000346inline arm_compute::ReductionOperation ConvertReductionOperationToAcl(const ReduceDescriptor& descriptor)
347{
348 switch (descriptor.m_ReduceOperation)
349 {
350 case ReduceOperation::Sum: return arm_compute::ReductionOperation::SUM;
351 case ReduceOperation::Mean: return arm_compute::ReductionOperation::MEAN_SUM;
352 case ReduceOperation::Max: return arm_compute::ReductionOperation::MAX;
353 case ReduceOperation::Min: return arm_compute::ReductionOperation::MIN;
Teresa Charlin4e3e8312021-08-05 12:34:37 +0100354 case ReduceOperation::Prod: return arm_compute::ReductionOperation::PROD;
355 default: throw InvalidArgumentException("Unsupported Reduction operation");
Sadik Armagana2747482021-02-09 10:28:54 +0000356 }
357}
358
Matthew Sloyan5fc0fd62021-05-03 12:22:03 +0100359/// Function to compute the output tensor shape based on the axes and if keepDims is set.
360inline const TensorInfo ComputeReductionTensorShape(const armnn::TensorInfo& input,
361 const std::vector<uint32_t>& vAxis,
362 const bool keepDims)
363{
364 auto reducedTensorInfo = input;
365 unsigned int rank = reducedTensorInfo.GetNumDimensions();
366 unsigned int outputRank = 0;
367 // Calculate output dimension
368 if (keepDims)
369 {
370 outputRank = rank;
371 }
372 else if (vAxis.empty())
373 {
374 outputRank = 1;
375 }
376 else if (vAxis.size() > reducedTensorInfo.GetNumDimensions())
377 {
378 throw LayerValidationException("ReduceLayer: Dimensions to reduce can not be bigger than input dimensions");
379 }
380 else
381 {
382 outputRank = reducedTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(vAxis.size());
383 if (outputRank == 0)
384 {
385 outputRank = 1;
386 }
387 }
388 std::vector<unsigned int> dimSizes(outputRank, 1);
389 if (!vAxis.empty())
390 {
391 // Skip the dimension that has been reduced unless keepDims is true.
392 unsigned int outputIndex = 0;
393 for (unsigned int i = 0; i < reducedTensorInfo.GetNumDimensions(); ++i)
394 {
395 if (std::find(vAxis.begin(), vAxis.end(), i) == vAxis.end())
396 {
397 dimSizes[outputIndex] = armnn::numeric_cast<unsigned int>(reducedTensorInfo.GetShape()[i]);
398 ++outputIndex;
399 }
400 else if (keepDims)
401 {
402 dimSizes[outputIndex] = 1;
403 ++outputIndex;
404 }
405 }
406 }
407 const TensorShape inferredShape = TensorShape(outputRank, dimSizes.data());
408 reducedTensorInfo.SetShape(inferredShape);
409 return reducedTensorInfo;
410}
411
412/// Macro function check if layer with multiple axes is supported on each backend
413#define IS_MULTI_AXES_REDUCE_SUPPORTED(func, input, desc, status) \
414 armnn::TensorInfo inputTensorInfo = input; \
415 unsigned int recalulatedAxis = 0; \
416 std::vector<uint32_t> axes; \
417 \
418 for (unsigned int i = 0; i != desc.m_vAxis.size(); ++i) \
419 { \
420 axes.emplace_back(desc.m_vAxis[i]); \
421 \
422 const armnn::TensorInfo& reducedTensorInfo = \
423 ComputeReductionTensorShape(input, axes, desc.m_KeepDims); \
424 \
425 std::vector<uint32_t> singleAxis(1, desc.m_vAxis[i] - recalulatedAxis); \
426 \
427 armnn::ReduceDescriptor newReduceDescriptor = desc; \
428 newReduceDescriptor.m_vAxis.assign(singleAxis.begin(), singleAxis.end()); \
429 \
430 status = func(inputTensorInfo, reducedTensorInfo, newReduceDescriptor); \
431 if (!status) \
432 { \
433 break; \
434 } \
435 \
436 if (!desc.m_KeepDims) \
437 { \
438 recalulatedAxis++; \
439 } \
440 \
441 inputTensorInfo = reducedTensorInfo; \
442 }
443
Aron Virginas-Tar5c3e9232018-11-16 11:00:48 +0000444} // namespace armnn