telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
David Beck | ecb56cd | 2018-09-05 12:52:57 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 4 | // |
| 5 | #pragma once |
| 6 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 7 | #include <armnn/Descriptors.hpp> |
Aron Virginas-Tar | 5c3e923 | 2018-11-16 11:00:48 +0000 | [diff] [blame] | 8 | #include <armnn/Tensor.hpp> |
Narumol Prangnawarat | ac2770a | 2020-04-01 16:51:23 +0100 | [diff] [blame] | 9 | #include <armnn/utility/Assert.hpp> |
Matthew Sloyan | 5fc0fd6 | 2021-05-03 12:22:03 +0100 | [diff] [blame] | 10 | #include <armnn/utility/NumericCast.hpp> |
Mike Kelly | 0d4ed39 | 2020-11-13 15:26:41 +0000 | [diff] [blame] | 11 | #include <backendsCommon/WorkloadData.hpp> |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 12 | |
| 13 | #include <arm_compute/core/Types.h> |
| 14 | |
Matthew Sloyan | 5fc0fd6 | 2021-05-03 12:22:03 +0100 | [diff] [blame] | 15 | #if defined(ARMCOMPUTENEON_ENABLED) |
| 16 | #include "neon/workloads/NeonReduceWorkload.hpp" |
| 17 | #endif |
| 18 | |
| 19 | #if defined(ARMCOMPUTECL_ENABLED) |
| 20 | #include "cl/workloads/ClReduceWorkload.hpp" |
| 21 | #endif |
| 22 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 23 | namespace armnn |
| 24 | { |
| 25 | |
| 26 | inline arm_compute::NormalizationLayerInfo |
Matteo Martincigh | 539b44d | 2018-10-01 09:26:39 +0100 | [diff] [blame] | 27 | CreateAclNormalizationLayerInfoForL2Normalization(const armnn::TensorInfo& tensorInfo, |
| 28 | armnn::DataLayout dataLayout) |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 29 | { |
Matteo Martincigh | 539b44d | 2018-10-01 09:26:39 +0100 | [diff] [blame] | 30 | unsigned int depthDimension = dataLayout == armnn::DataLayout::NCHW ? 1 : 3; |
| 31 | const unsigned int depth = tensorInfo.GetShape()[depthDimension]; |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 32 | |
| 33 | // At the time of writing, {CL|Neon}L2Normalization performs the reduction only along dimension 0. This version of |
| 34 | // L2 Normalization always performs the reduction along the depth axis, though. Thus, we repurpose |
| 35 | // {CL|Neon}NormalizationLayers to act as depthwise L2 normalizations by carefully chosing the normalization |
| 36 | // parameters. |
| 37 | // |
| 38 | // Please refer to both the reference implementation of the normalization layer and the implementation of |
| 39 | // {CL|Neon}NormalizationLayer when checking the derivations for the parameter values below. |
| 40 | |
| 41 | // Make sure normalization covers the entire depth range. ACL requires the normalization size to be odd. |
| 42 | // CL: This does not result in extra kernel threads not doing any work: See usage of the RADIUS parameter in |
| 43 | // ACL's normalization_layer_cross_map() CL function. |
| 44 | const uint32_t normSize = depth * 2u + 1u; |
| 45 | |
| 46 | // See ACL's NormalizationLayerInfo::scale_coeff() definition. |
| 47 | // For the reference implementation, to make alpha_ become 1, we'd have to use alpha = normSize instead. |
| 48 | const float alpha = 1.0f; |
| 49 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 50 | // Don't offset the reduction. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 51 | const float kappa = 0.0f; |
| 52 | |
| 53 | // pow(reduction, -0.5) = 1 / sqrt(reduction) |
| 54 | const float beta = 0.5f; |
| 55 | |
| 56 | return arm_compute::NormalizationLayerInfo(arm_compute::NormType::CROSS_MAP, normSize, alpha, beta, kappa, false); |
| 57 | } |
| 58 | |
| 59 | inline arm_compute::ActivationLayerInfo::ActivationFunction |
| 60 | ConvertActivationFunctionToAclActivationFunction(ActivationFunction armnnFunction) |
| 61 | { |
| 62 | using AclActivationFunction = arm_compute::ActivationLayerInfo::ActivationFunction; |
| 63 | |
| 64 | switch (armnnFunction) |
| 65 | { |
| 66 | case ActivationFunction::Linear: return AclActivationFunction::LINEAR; |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 67 | // Arm compute's 'logistic' function is non-parameterized, so it is exactly a sigmoid function. |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 68 | case ActivationFunction::Sigmoid: return AclActivationFunction::LOGISTIC; |
| 69 | case ActivationFunction::ReLu: return AclActivationFunction::RELU; |
| 70 | case ActivationFunction::BoundedReLu: return AclActivationFunction::LU_BOUNDED_RELU; |
| 71 | case ActivationFunction::SoftReLu: return AclActivationFunction::SOFT_RELU; |
| 72 | case ActivationFunction::LeakyReLu: return AclActivationFunction::LEAKY_RELU; |
| 73 | case ActivationFunction::Abs: return AclActivationFunction::ABS; |
| 74 | case ActivationFunction::Sqrt: return AclActivationFunction::SQRT; |
| 75 | case ActivationFunction::Square: return AclActivationFunction::SQUARE; |
| 76 | case ActivationFunction::TanH: return AclActivationFunction::TANH; |
David Monahan | 3b3c381 | 2020-02-25 09:03:29 +0000 | [diff] [blame] | 77 | case ActivationFunction::Elu: return AclActivationFunction::ELU; |
Jan Eilers | a83af7b | 2020-03-18 15:58:11 +0000 | [diff] [blame] | 78 | case ActivationFunction::HardSwish: return AclActivationFunction::HARD_SWISH; |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 79 | default: throw InvalidArgumentException("Unsupported activation function"); |
| 80 | } |
| 81 | } |
| 82 | |
| 83 | inline arm_compute::ActivationLayerInfo |
| 84 | ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor& actDesc) |
| 85 | { |
| 86 | return arm_compute::ActivationLayerInfo(ConvertActivationFunctionToAclActivationFunction(actDesc.m_Function), |
| 87 | actDesc.m_A, actDesc.m_B); |
| 88 | } |
| 89 | |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 90 | inline arm_compute::ActivationLayerInfo |
| 91 | ConvertActivationDescriptorToAclActivationLayerInfo(const ActivationDescriptor* activationDescPtr) |
| 92 | { |
| 93 | if (activationDescPtr != nullptr) |
| 94 | { |
| 95 | return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>( |
| 96 | *activationDescPtr)); |
| 97 | } |
| 98 | return arm_compute::ActivationLayerInfo(); |
| 99 | } |
| 100 | |
| 101 | inline arm_compute::ActivationLayerInfo |
| 102 | ConvertAdditionalInfoToAclActivationLayerInfo(const QueueDescriptor& queueDescriptor) |
| 103 | { |
| 104 | const ActivationDescriptor* activationDescPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>(); |
| 105 | |
| 106 | if (activationDescPtr != nullptr) |
| 107 | { |
| 108 | return ConvertActivationDescriptorToAclActivationLayerInfo(static_cast<ActivationDescriptor>( |
| 109 | *activationDescPtr)); |
| 110 | } |
| 111 | return arm_compute::ActivationLayerInfo(); |
| 112 | } |
| 113 | |
Teresa Charlin | 2b030d9 | 2020-03-27 16:40:56 +0000 | [diff] [blame] | 114 | inline arm_compute::ComparisonOperation ConvertComparisonOperationToAcl(const ComparisonDescriptor& descriptor) |
| 115 | { |
| 116 | switch (descriptor.m_Operation) |
| 117 | { |
| 118 | case ComparisonOperation::Greater: return arm_compute::ComparisonOperation::Greater; |
| 119 | case ComparisonOperation::GreaterOrEqual: return arm_compute::ComparisonOperation::GreaterEqual; |
| 120 | case ComparisonOperation::Less: return arm_compute::ComparisonOperation::Less; |
| 121 | case ComparisonOperation::LessOrEqual: return arm_compute::ComparisonOperation::LessEqual; |
| 122 | case ComparisonOperation::Equal: return arm_compute::ComparisonOperation::Equal; |
| 123 | case ComparisonOperation::NotEqual: return arm_compute::ComparisonOperation::NotEqual; |
| 124 | default: throw InvalidArgumentException("Unsupported comparison function"); |
| 125 | } |
| 126 | } |
| 127 | |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 128 | inline arm_compute::PoolingType ConvertPoolingAlgorithmToAclPoolingType(PoolingAlgorithm poolingAlgorithm) |
| 129 | { |
| 130 | using arm_compute::PoolingType; |
| 131 | |
| 132 | switch (poolingAlgorithm) |
| 133 | { |
| 134 | case PoolingAlgorithm::Max: return PoolingType::MAX; |
| 135 | case PoolingAlgorithm::Average: return PoolingType::AVG; |
| 136 | case PoolingAlgorithm::L2: return PoolingType::L2; |
| 137 | default: throw InvalidArgumentException("Unsupported pooling algorithm"); |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | inline arm_compute::DimensionRoundingType ConvertOutputShapeRoundingToAclDimensionRoundingType(OutputShapeRounding |
| 142 | rounding) |
| 143 | { |
| 144 | using arm_compute::DimensionRoundingType; |
| 145 | |
| 146 | switch (rounding) |
| 147 | { |
| 148 | case OutputShapeRounding::Ceiling: return DimensionRoundingType::CEIL; |
| 149 | case OutputShapeRounding::Floor: return DimensionRoundingType::FLOOR; |
| 150 | default: throw InvalidArgumentException("Unsupported Output Shape Rounding type"); |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | inline arm_compute::NormType |
| 155 | ConvertNormalizationAlgorithmChannelToAclNormType(NormalizationAlgorithmChannel channelType) |
| 156 | { |
| 157 | using arm_compute::NormType; |
| 158 | switch (channelType) |
| 159 | { |
| 160 | case NormalizationAlgorithmChannel::Across: return NormType::CROSS_MAP; |
| 161 | case NormalizationAlgorithmChannel::Within: return NormType::IN_MAP_2D; |
| 162 | default: throw InvalidArgumentException("Unsupported normalization algorithm channel type"); |
| 163 | } |
| 164 | } |
| 165 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 166 | inline arm_compute::FullyConnectedLayerInfo |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 167 | ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc, |
| 168 | const ActivationDescriptor* activationDesc) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 169 | { |
| 170 | arm_compute::FullyConnectedLayerInfo fc_info; |
| 171 | fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix; |
Mike Kelly | 07810fc | 2020-11-12 10:58:48 +0000 | [diff] [blame] | 172 | fc_info.activation_info = ConvertActivationDescriptorToAclActivationLayerInfo(activationDesc); |
| 173 | return fc_info; |
| 174 | } |
| 175 | |
| 176 | inline arm_compute::FullyConnectedLayerInfo |
| 177 | ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(const FullyConnectedDescriptor& fullyConnectedDesc, |
| 178 | arm_compute::ActivationLayerInfo activationLayerInfo) |
| 179 | { |
| 180 | arm_compute::FullyConnectedLayerInfo fc_info; |
| 181 | fc_info.transpose_weights = fullyConnectedDesc.m_TransposeWeightMatrix; |
| 182 | fc_info.activation_info = activationLayerInfo; |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 183 | return fc_info; |
| 184 | } |
| 185 | |
Aron Virginas-Tar | cc0cefb | 2019-07-02 17:25:47 +0100 | [diff] [blame] | 186 | inline arm_compute::InterpolationPolicy ConvertResizeMethodToAclInterpolationPolicy(ResizeMethod resizeMethod) |
| 187 | { |
| 188 | switch (resizeMethod) |
| 189 | { |
| 190 | case ResizeMethod::Bilinear: |
| 191 | return arm_compute::InterpolationPolicy::BILINEAR; |
| 192 | case ResizeMethod::NearestNeighbor: |
| 193 | return arm_compute::InterpolationPolicy::NEAREST_NEIGHBOR; |
| 194 | default: |
| 195 | throw InvalidArgumentException("Unsupported resize method"); |
| 196 | } |
| 197 | } |
| 198 | |
Teresa Charlin | c1f6b09 | 2020-05-11 16:10:38 +0100 | [diff] [blame] | 199 | template<typename T> |
| 200 | inline T ComputeSoftmaxAclAxis(const SoftmaxDescriptor& softmaxDesc, const armnn::TensorInfo& tensor) |
Narumol Prangnawarat | 65d3096 | 2019-03-14 11:55:03 +0000 | [diff] [blame] | 201 | { |
David Monahan | 9b14bfc | 2020-06-30 15:57:56 +0100 | [diff] [blame] | 202 | // Detect the Android default value of -1 and return the ACL default value of 0. |
Colm Donelan | c3c5fc2 | 2019-08-15 16:03:17 +0100 | [diff] [blame] | 203 | if (softmaxDesc.m_Axis == -1) |
| 204 | { |
David Monahan | 9b14bfc | 2020-06-30 15:57:56 +0100 | [diff] [blame] | 205 | return 0; |
Colm Donelan | c3c5fc2 | 2019-08-15 16:03:17 +0100 | [diff] [blame] | 206 | } |
| 207 | |
David Monahan | 9b14bfc | 2020-06-30 15:57:56 +0100 | [diff] [blame] | 208 | unsigned int dim = tensor.GetNumDimensions(); |
Narumol Prangnawarat | 65d3096 | 2019-03-14 11:55:03 +0000 | [diff] [blame] | 209 | |
Narumol Prangnawarat | ac2770a | 2020-04-01 16:51:23 +0100 | [diff] [blame] | 210 | ARMNN_ASSERT(dim != 0); |
Narumol Prangnawarat | 65d3096 | 2019-03-14 11:55:03 +0000 | [diff] [blame] | 211 | |
| 212 | // Currently ArmNN support axis 1. |
David Monahan | 9b14bfc | 2020-06-30 15:57:56 +0100 | [diff] [blame] | 213 | auto aclAxis = (static_cast<T>(dim) - 1); |
| 214 | aclAxis = aclAxis > 0 ? aclAxis -1 : aclAxis; |
| 215 | |
| 216 | return aclAxis; |
Narumol Prangnawarat | 65d3096 | 2019-03-14 11:55:03 +0000 | [diff] [blame] | 217 | } |
| 218 | |
Narumol Prangnawarat | 15eb583 | 2019-05-20 15:31:05 +0100 | [diff] [blame] | 219 | inline std::set<unsigned int> ComputeSplitAxis(const armnn::SplitterDescriptor& desc, const TensorShape& input) |
| 220 | { |
| 221 | unsigned int numSplit = desc.GetNumViews(); |
| 222 | unsigned int numDimensions = desc.GetNumDimensions(); |
| 223 | std::set<unsigned int> splitAxis; |
| 224 | |
| 225 | for (unsigned int i = 0; i < numSplit; ++i) |
| 226 | { |
| 227 | for (unsigned int dimIdx = 0; dimIdx < numDimensions; ++dimIdx) |
| 228 | { |
| 229 | if (desc.GetViewSizes(i)[dimIdx] != input[dimIdx]) |
| 230 | { |
| 231 | splitAxis.insert(dimIdx); |
| 232 | } |
| 233 | } |
| 234 | } |
| 235 | return splitAxis; |
| 236 | } |
| 237 | |
Teresa Charlin | 7ac3ca6 | 2020-07-28 15:17:12 +0100 | [diff] [blame] | 238 | /// Function to convert ArmNN axis (left to right) to ACL axis (right to left) ranging from [-rank, rank) |
Teresa Charlin | f540eb8 | 2020-04-10 19:24:55 +0100 | [diff] [blame] | 239 | inline int ComputeAclAxis(const int& armnnAxis, const armnn::TensorInfo& tensor) |
| 240 | { |
Teresa Charlin | 7ac3ca6 | 2020-07-28 15:17:12 +0100 | [diff] [blame] | 241 | int rank = static_cast<int>(tensor.GetNumDimensions()); |
Teresa Charlin | f540eb8 | 2020-04-10 19:24:55 +0100 | [diff] [blame] | 242 | |
Teresa Charlin | 7ac3ca6 | 2020-07-28 15:17:12 +0100 | [diff] [blame] | 243 | ARMNN_ASSERT(rank != 0); |
| 244 | ARMNN_ASSERT((-1 * rank) <= armnnAxis); |
| 245 | ARMNN_ASSERT(armnnAxis < rank); |
Teresa Charlin | f540eb8 | 2020-04-10 19:24:55 +0100 | [diff] [blame] | 246 | |
| 247 | int sign = (armnnAxis < 0) ? -1 : 1; |
Teresa Charlin | 7ac3ca6 | 2020-07-28 15:17:12 +0100 | [diff] [blame] | 248 | int aclAxis = sign * rank - 1 - armnnAxis; |
Teresa Charlin | f540eb8 | 2020-04-10 19:24:55 +0100 | [diff] [blame] | 249 | |
| 250 | return aclAxis; |
| 251 | } |
| 252 | |
Teresa Charlin | 7ac3ca6 | 2020-07-28 15:17:12 +0100 | [diff] [blame] | 253 | /// Function to convert axis to its positive equivalent value. |
| 254 | /// [-rank, rank) --> [0, rank) |
| 255 | inline unsigned int ComputePositiveAxis(const int& axis, const armnn::TensorInfo& tensor) |
| 256 | { |
| 257 | int rank = static_cast<int>(tensor.GetNumDimensions()); |
| 258 | |
| 259 | ARMNN_ASSERT(rank != 0); |
| 260 | ARMNN_ASSERT((-1 * rank) <= axis); |
| 261 | ARMNN_ASSERT(axis < rank); |
| 262 | |
| 263 | int positiveAxis = (axis < 0) ? rank + axis : axis; |
| 264 | return static_cast<unsigned int>(positiveAxis); |
| 265 | } |
| 266 | |
Sadik Armagan | a274748 | 2021-02-09 10:28:54 +0000 | [diff] [blame] | 267 | inline arm_compute::ReductionOperation ConvertReductionOperationToAcl(const ReduceDescriptor& descriptor) |
| 268 | { |
| 269 | switch (descriptor.m_ReduceOperation) |
| 270 | { |
| 271 | case ReduceOperation::Sum: return arm_compute::ReductionOperation::SUM; |
| 272 | case ReduceOperation::Mean: return arm_compute::ReductionOperation::MEAN_SUM; |
| 273 | case ReduceOperation::Max: return arm_compute::ReductionOperation::MAX; |
| 274 | case ReduceOperation::Min: return arm_compute::ReductionOperation::MIN; |
| 275 | default: throw InvalidArgumentException("Unsupported Reduction operation"); |
| 276 | } |
| 277 | } |
| 278 | |
Matthew Sloyan | 5fc0fd6 | 2021-05-03 12:22:03 +0100 | [diff] [blame] | 279 | /// Function to compute the output tensor shape based on the axes and if keepDims is set. |
| 280 | inline const TensorInfo ComputeReductionTensorShape(const armnn::TensorInfo& input, |
| 281 | const std::vector<uint32_t>& vAxis, |
| 282 | const bool keepDims) |
| 283 | { |
| 284 | auto reducedTensorInfo = input; |
| 285 | unsigned int rank = reducedTensorInfo.GetNumDimensions(); |
| 286 | unsigned int outputRank = 0; |
| 287 | // Calculate output dimension |
| 288 | if (keepDims) |
| 289 | { |
| 290 | outputRank = rank; |
| 291 | } |
| 292 | else if (vAxis.empty()) |
| 293 | { |
| 294 | outputRank = 1; |
| 295 | } |
| 296 | else if (vAxis.size() > reducedTensorInfo.GetNumDimensions()) |
| 297 | { |
| 298 | throw LayerValidationException("ReduceLayer: Dimensions to reduce can not be bigger than input dimensions"); |
| 299 | } |
| 300 | else |
| 301 | { |
| 302 | outputRank = reducedTensorInfo.GetNumDimensions() - armnn::numeric_cast<unsigned int>(vAxis.size()); |
| 303 | if (outputRank == 0) |
| 304 | { |
| 305 | outputRank = 1; |
| 306 | } |
| 307 | } |
| 308 | std::vector<unsigned int> dimSizes(outputRank, 1); |
| 309 | if (!vAxis.empty()) |
| 310 | { |
| 311 | // Skip the dimension that has been reduced unless keepDims is true. |
| 312 | unsigned int outputIndex = 0; |
| 313 | for (unsigned int i = 0; i < reducedTensorInfo.GetNumDimensions(); ++i) |
| 314 | { |
| 315 | if (std::find(vAxis.begin(), vAxis.end(), i) == vAxis.end()) |
| 316 | { |
| 317 | dimSizes[outputIndex] = armnn::numeric_cast<unsigned int>(reducedTensorInfo.GetShape()[i]); |
| 318 | ++outputIndex; |
| 319 | } |
| 320 | else if (keepDims) |
| 321 | { |
| 322 | dimSizes[outputIndex] = 1; |
| 323 | ++outputIndex; |
| 324 | } |
| 325 | } |
| 326 | } |
| 327 | const TensorShape inferredShape = TensorShape(outputRank, dimSizes.data()); |
| 328 | reducedTensorInfo.SetShape(inferredShape); |
| 329 | return reducedTensorInfo; |
| 330 | } |
| 331 | |
| 332 | /// Macro function check if layer with multiple axes is supported on each backend |
| 333 | #define IS_MULTI_AXES_REDUCE_SUPPORTED(func, input, desc, status) \ |
| 334 | armnn::TensorInfo inputTensorInfo = input; \ |
| 335 | unsigned int recalulatedAxis = 0; \ |
| 336 | std::vector<uint32_t> axes; \ |
| 337 | \ |
| 338 | for (unsigned int i = 0; i != desc.m_vAxis.size(); ++i) \ |
| 339 | { \ |
| 340 | axes.emplace_back(desc.m_vAxis[i]); \ |
| 341 | \ |
| 342 | const armnn::TensorInfo& reducedTensorInfo = \ |
| 343 | ComputeReductionTensorShape(input, axes, desc.m_KeepDims); \ |
| 344 | \ |
| 345 | std::vector<uint32_t> singleAxis(1, desc.m_vAxis[i] - recalulatedAxis); \ |
| 346 | \ |
| 347 | armnn::ReduceDescriptor newReduceDescriptor = desc; \ |
| 348 | newReduceDescriptor.m_vAxis.assign(singleAxis.begin(), singleAxis.end()); \ |
| 349 | \ |
| 350 | status = func(inputTensorInfo, reducedTensorInfo, newReduceDescriptor); \ |
| 351 | if (!status) \ |
| 352 | { \ |
| 353 | break; \ |
| 354 | } \ |
| 355 | \ |
| 356 | if (!desc.m_KeepDims) \ |
| 357 | { \ |
| 358 | recalulatedAxis++; \ |
| 359 | } \ |
| 360 | \ |
| 361 | inputTensorInfo = reducedTensorInfo; \ |
| 362 | } |
| 363 | |
Aron Virginas-Tar | 5c3e923 | 2018-11-16 11:00:48 +0000 | [diff] [blame] | 364 | } // namespace armnn |