blob: 472c75f22217f06260fca62f1439fd459d6016dd [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
arovir019e53a352018-08-31 15:26:35 +01006#include "NeonNormalizationFloatWorkload.hpp"
David Beck0dbe0ee2018-09-24 15:59:27 +01007#include <backends/neon/NeonLayerSupport.hpp>
David Beck711fa312018-09-24 10:46:38 +01008#include <backends/aclCommon/ArmComputeUtils.hpp>
9#include <backends/aclCommon/ArmComputeTensorUtils.hpp>
telsoa014fcda012018-03-09 14:13:49 +000010
11namespace armnn
12{
13
telsoa01c577f2c2018-08-31 09:22:23 +010014arm_compute::Status NeonNormalizationWorkloadValidate(const TensorInfo& input,
15 const TensorInfo& output,
16 const NormalizationDescriptor& descriptor)
17{
18 const arm_compute::TensorInfo aclInput = armcomputetensorutils::BuildArmComputeTensorInfo(input);
19 const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
20
21 arm_compute::NormalizationLayerInfo normalizationInfo =
22 armcomputetensorutils::BuildArmComputeNormalizationLayerInfo(descriptor);
23
24 return arm_compute::NENormalizationLayer::validate(&aclInput, &aclOutput, normalizationInfo);
25}
26
arovir019e53a352018-08-31 15:26:35 +010027NeonNormalizationFloatWorkload::NeonNormalizationFloatWorkload(const NormalizationQueueDescriptor& descriptor,
telsoa01c577f2c2018-08-31 09:22:23 +010028 const WorkloadInfo& info,
29 std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
30 : FloatWorkload<NormalizationQueueDescriptor>(descriptor, info)
surmeh013537c2c2018-05-18 16:31:43 +010031 , m_NormalizationLayer(memoryManager)
telsoa014fcda012018-03-09 14:13:49 +000032{
arovir019e53a352018-08-31 15:26:35 +010033 m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1);
telsoa014fcda012018-03-09 14:13:49 +000034 std::string reasonIfUnsupported;
35 if (!IsNeonNormalizationDescParamsSupported(&reasonIfUnsupported, m_Data.m_Parameters))
36 {
37 throw UnimplementedException(reasonIfUnsupported);
38 }
39
telsoa01c577f2c2018-08-31 09:22:23 +010040 // Input and output tensors have to have the same dimensionality.
telsoa014fcda012018-03-09 14:13:49 +000041 if (info.m_InputTensorInfos[0].GetShape()[1] != info.m_OutputTensorInfos[0].GetShape()[1]
42 || info.m_InputTensorInfos[0].GetShape()[0] != info.m_OutputTensorInfos[0].GetShape()[0]
43 || info.m_InputTensorInfos[0].GetShape()[3] != info.m_OutputTensorInfos[0].GetShape()[3]
44 || info.m_InputTensorInfos[0].GetShape()[2] != info.m_OutputTensorInfos[0].GetShape()[2])
45 {
46 throw InvalidArgumentException("Normalization requires input and output tensors to have equal dimensionality.");
47 }
48
49 arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
50 arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
51
52 const arm_compute::NormType normType =
53 ConvertNormalizationAlgorithmChannelToAclNormType(m_Data.m_Parameters.m_NormChannelType);
54 arm_compute::NormalizationLayerInfo normalizationInfo(normType,
55 m_Data.m_Parameters.m_NormSize,
56 m_Data.m_Parameters.m_Alpha,
57 m_Data.m_Parameters.m_Beta,
58 m_Data.m_Parameters.m_K,
59 false);
60
61 m_NormalizationLayer.configure(&input, &output, normalizationInfo);
62}
63
arovir019e53a352018-08-31 15:26:35 +010064void NeonNormalizationFloatWorkload::Execute() const
telsoa014fcda012018-03-09 14:13:49 +000065{
arovir019e53a352018-08-31 15:26:35 +010066 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonNormalizationFloatWorkload_Execute");
telsoa014fcda012018-03-09 14:13:49 +000067 m_NormalizationLayer.run();
68}
69
70} //namespace armnn