blob: 2a355edaed44e4518a54b1ebbbfcaf30c8859ec7 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
Mike Kelly7cbe7812023-07-25 17:37:33 +01002// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
arovir019e53a352018-08-31 15:26:35 +01006#include "NeonNormalizationFloatWorkload.hpp"
Matthew Benthamd80a7122019-01-08 17:52:37 +00007
8#include "NeonWorkloadUtils.hpp"
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00009#include <aclCommon/ArmComputeUtils.hpp>
10#include <aclCommon/ArmComputeTensorUtils.hpp>
Jan Eilersbb446e52020-04-02 13:56:54 +010011#include <armnn/utility/PolymorphicDowncast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000012
Matthew Benthamd80a7122019-01-08 17:52:37 +000013#include <arm_compute/runtime/NEON/functions/NENormalizationLayer.h>
14
narpra0133cea4d2018-09-27 16:46:14 +010015using namespace armnn::armcomputetensorutils;
16
telsoa014fcda012018-03-09 14:13:49 +000017namespace armnn
18{
19
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010020namespace
21{
Keith Davis2d0679f2021-08-05 11:35:00 +010022using ACLMemManagerOnDemand = std::shared_ptr<arm_compute::MemoryManagerOnDemand>;
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010023
24bool IsNeonNormalizationDescriptorSupported(const NormalizationDescriptor& parameters,
25 Optional<std::string&> reasonIfUnsupported)
26{
27 if (parameters.m_NormMethodType != NormalizationAlgorithmMethod::LocalBrightness)
28 {
29 if (reasonIfUnsupported)
30 {
31 reasonIfUnsupported.value() = "Unsupported normalisation method type, only LocalBrightness is supported";
32 }
33 return false;
34 }
35 if (parameters.m_NormSize % 2 == 0)
36 {
37 if (reasonIfUnsupported)
38 {
39 reasonIfUnsupported.value() = "Normalization size must be an odd number.";
40 }
41 return false;
42 }
43
44 return true;
45}
46
47} // anonymous namespace
48
telsoa01c577f2c2018-08-31 09:22:23 +010049arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo& input,
50 const TensorInfo& output,
51 const NormalizationDescriptor& descriptor)
52{
narpra0133cea4d2018-09-27 16:46:14 +010053 const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input, descriptor.m_DataLayout);
54 const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output, descriptor.m_DataLayout);
telsoa01c577f2c2018-08-31 09:22:23 +010055
narpra0133cea4d2018-09-27 16:46:14 +010056 arm_compute::NormalizationLayerInfo normalizationInfo = BuildArmComputeNormalizationLayerInfo(descriptor);
telsoa01c577f2c2018-08-31 09:22:23 +010057
58 return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
59}
60
arovir019e53a352018-08-31 15:26:35 +010061NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor,
Keith Davis2d0679f2021-08-05 11:35:00 +010062 const WorkloadInfo& info,
63 ACLMemManagerOnDemand& memoryManager)
telsoa01c577f2c2018-08-31 09:22:23 +010064 : FloatWorkload<NormalizationQueueDescriptor>(descriptor, info)
telsoa014fcda012018-03-09 14:13:49 +000065{
Keith Davis2d0679f2021-08-05 11:35:00 +010066 // Report Profiling Details
67 ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonNormalizationWorkload_Construct",
68 descriptor.m_Parameters,
69 info,
70 this->GetGuid());
71
arovir019e53a352018-08-31 15:26:35 +010072 m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1);
telsoa014fcda012018-03-09 14:13:49 +000073 std::string reasonIfUnsupported;
Aron Virginas-Tarfc824312018-10-15 15:00:13 +010074 if (!IsNeonNormalizationDescriptorSupported(m_Data.m_Parameters, Optional<std::string&>(reasonIfUnsupported)))
telsoa014fcda012018-03-09 14:13:49 +000075 {
76 throw UnimplementedException(reasonIfUnsupported);
77 }
78
telsoa01c577f2c2018-08-31 09:22:23 +010079 // Input and output tensors have to have the same dimensionality.
telsoa014fcda012018-03-09 14:13:49 +000080 if (info.m_InputTensorInfos[0].GetShape()[1] != info.m_OutputTensorInfos[0].GetShape()[1]
81 || info.m_InputTensorInfos[0].GetShape()[0] != info.m_OutputTensorInfos[0].GetShape()[0]
82 || info.m_InputTensorInfos[0].GetShape()[3] != info.m_OutputTensorInfos[0].GetShape()[3]
83 || info.m_InputTensorInfos[0].GetShape()[2] != info.m_OutputTensorInfos[0].GetShape()[2])
84 {
85 throw InvalidArgumentException("Normalization requires input and output tensors to have equal dimensionality.");
86 }
87
Jan Eilersbb446e52020-04-02 13:56:54 +010088 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
89 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
narpra0155a97bc2018-10-02 14:35:53 +010090 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
91 input.info()->set_data_layout(aclDataLayout);
92 output.info()->set_data_layout(aclDataLayout);
telsoa014fcda012018-03-09 14:13:49 +000093
94 const arm_compute::NormType normType =
95 ConvertNormalizationAlgorithmChannelToAclNormType(m_Data.m_Parameters.m_NormChannelType);
96 arm_compute::NormalizationLayerInfo normalizationInfo(normType,
97 m_Data.m_Parameters.m_NormSize,
98 m_Data.m_Parameters.m_Alpha,
99 m_Data.m_Parameters.m_Beta,
100 m_Data.m_Parameters.m_K,
101 false);
Matthew Benthamd80a7122019-01-08 17:52:37 +0000102 auto layer = std::make_unique<arm_compute::NENormalizationLayer>(memoryManager);
103 layer->configure(&input, &output, normalizationInfo);
104 m_NormalizationLayer.reset(layer.release());
telsoa014fcda012018-03-09 14:13:49 +0000105}
106
arovir019e53a352018-08-31 15:26:35 +0100107void NeonNormalizationFloatWorkload::Execute() const
telsoa014fcda012018-03-09 14:13:49 +0000108{
Mike Kelly7cbe7812023-07-25 17:37:33 +0100109 ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonNormalizationFloatWorkload_Execute");
Matthew Benthamd80a7122019-01-08 17:52:37 +0000110 m_NormalizationLayer->run();
telsoa014fcda012018-03-09 14:13:49 +0000111}
112
David Monahanec819992022-02-10 14:47:13 +0000113void NeonNormalizationFloatWorkload::ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
114{
115 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
116 this->m_Data.m_Inputs[slot] = tensorHandle;
117 try
118 {
119 Reconfigure();
120 }
121 catch(armnn::UnimplementedException& e)
122 {
123 // Cannot reconfigure, revert the slot back and throw the exception.
124 this->m_Data.m_Inputs[slot] = backupHandle;
125 throw e;
126 }
127}
128
129// Replace output tensor handle with the given TensorHandle
130void NeonNormalizationFloatWorkload::ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot)
131{
132 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot];
133 this->m_Data.m_Inputs[slot] = tensorHandle;
134 try
135 {
136 Reconfigure();
137 }
138 catch(armnn::UnimplementedException& e)
139 {
140 // Cannot reconfigure, revert the slot back and throw the exception.
141 this->m_Data.m_Inputs[slot] = backupHandle;
142 throw e;
143 }
144}
145
146void NeonNormalizationFloatWorkload::Reconfigure()
147{
148 throw armnn::UnimplementedException("Reconfigure not implemented for this workload");
149}
150
telsoa014fcda012018-03-09 14:13:49 +0000151} //namespace armnn