blob: 77fc429b952b26600446741689ad78b00ffc2837 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
arovir019e53a352018-08-31 15:26:35 +01006#include "NeonNormalizationFloatWorkload.hpp"
Matthew Benthamd80a7122019-01-08 17:52:37 +00007
8#include "NeonWorkloadUtils.hpp"
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00009#include <aclCommon/ArmComputeUtils.hpp>
10#include <aclCommon/ArmComputeTensorUtils.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010011#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012
Matthew Benthamd80a7122019-01-08 17:52:37 +000013#include <arm_compute/runtime/NEON/functions/NENormalizationLayer.h>
14
narpra0133cea4d2018-09-27 16:46:14 +010015using namespace armnn::armcomputetensorutils;
16
telsoa014fcda012018-03-09 14:13:49 +000017namespace armnn
18{
19
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010020namespace
21{
22
23bool IsNeonNormalizationDescriptorSupported(const NormalizationDescriptor& parameters,
24 Optional<std::string&> reasonIfUnsupported)
25{
26 if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness)
27 {
28 if (reasonIfUnsupported)
29 {
30 reasonIfUnsupported.value() = "Unsupported normalisation method type, only LocalBrightness is supported";
31 }
32 return false;
33 }
34 if (parameters.m_NormSize % 2 == 0)
35 {
36 if (reasonIfUnsupported)
37 {
38 reasonIfUnsupported.value() = "Normalization size must be an odd number.";
39 }
40 return false;
41 }
42
43 return true;
44}
45
46} // anonymous namespace
47
telsoa01c577f2c2018-08-31 09:22:23 +010048arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo& input,
49 const TensorInfo& output,
50 const NormalizationDescriptor& descriptor)
51{
narpra0133cea4d2018-09-27 16:46:14 +010052 const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
53 const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +010054
narpra0133cea4d2018-09-27 16:46:14 +010055 arm_compute::NormalizationLayerInfo normalizationInfo = BuildArmComputeNormalizationLayerInfo(descriptor);
telsoa01c577f2c2018-08-31 09:22:23 +010056
57 return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
58}
59
arovir019e53a352018-08-31 15:26:35 +010060NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor,
telsoa01c577f2c2018-08-31 09:22:23 +010061 const WorkloadInfo& info,
62 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
63 : FloatWorkload<NormalizationQueueDescriptor>(descriptor, info)
telsoa014fcda012018-03-09 14:13:49 +000064{
arovir019e53a352018-08-31 15:26:35 +010065 m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1);
telsoa014fcda012018-03-09 14:13:49 +000066 std::string reasonIfUnsupported;
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010067 if (!IsNeonNormalizationDescriptorSupported(m_Data.m_Parameters, Optional<std::string&>(reasonIfUnsupported)))
telsoa014fcda012018-03-09 14:13:49 +000068 {
69 throw UnimplementedException(reasonIfUnsupported);
70 }
71
telsoa01c577f2c2018-08-31 09:22:23 +010072 // Input and output tensors have to have the same dimensionality.
telsoa014fcda012018-03-09 14:13:49 +000073 if (info.m_InputTensorInfos[0].GetShape()[1] != info.m_OutputTensorInfos[0].GetShape()[1]
74 || info.m_InputTensorInfos[0].GetShape()[0] != info.m_OutputTensorInfos[0].GetShape()[0]
75 || info.m_InputTensorInfos[0].GetShape()[3] != info.m_OutputTensorInfos[0].GetShape()[3]
76 || info.m_InputTensorInfos[0].GetShape()[2] != info.m_OutputTensorInfos[0].GetShape()[2])
77 {
78 throw InvalidArgumentException("Normalization requires input and output tensors to have equal dimensionality.");
79 }
80
Jan Eilersbb446e52020-04-02 13:56:54 +010081 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
82 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
narpra0155a97bc2018-10-02 14:35:53 +010083 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
84 input.info()->set_data_layout(aclDataLayout);
85 output.info()->set_data_layout(aclDataLayout);
telsoa014fcda012018-03-09 14:13:49 +000086
87 const arm_compute::NormType normType =
88 ConvertNormalizationAlgorithmChannelToAclNormType(m_Data.m_Parameters.m_NormChannelType);
89 arm_compute::NormalizationLayerInfo normalizationInfo(normType,
90 m_Data.m_Parameters.m_NormSize,
91 m_Data.m_Parameters.m_Alpha,
92 m_Data.m_Parameters.m_Beta,
93 m_Data.m_Parameters.m_K,
94 false);
Matthew Benthamd80a7122019-01-08 17:52:37 +000095 auto layer = std::make_unique<arm_compute::NENormalizationLayer>(memoryManager);
96 layer->configure(&input, &output, normalizationInfo);
97 m_NormalizationLayer.reset(layer.release());
telsoa014fcda012018-03-09 14:13:49 +000098}
99
arovir019e53a352018-08-31 15:26:35 +0100100void NeonNormalizationFloatWorkload::Execute() const
telsoa014fcda012018-03-09 14:13:49 +0000101{
arovir019e53a352018-08-31 15:26:35 +0100102 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonNormalizationFloatWorkload_Execute");
Matthew Benthamd80a7122019-01-08 17:52:37 +0000103 m_NormalizationLayer->run();
telsoa014fcda012018-03-09 14:13:49 +0000104}
105
106} //namespace armnn