blob: ca31503620f3a62c303563d1c45b0e941cf7ad5d [file] [log] [blame]
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "RefL2NormalizationWorkload.hpp"
Ferran Balaguerd73d14f2019-06-10 10:29:54 +01007#include "RefWorkloadUtils.hpp"
8#include "Decoders.hpp"
9#include "Encoders.hpp"
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010010
Matteo Martincighe011d202019-11-28 11:35:47 +000011#include <Profiling.hpp>
12
13#include <armnnUtils/DataLayoutIndexed.hpp>
Matthew Sloyan171214c2020-09-09 09:07:37 +010014#include <armnn/utility/NumericCast.hpp>
Matthew Jackson82b15ed2019-07-25 16:14:30 +010015
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010016#include <cmath>
17
18using namespace armnnUtils;
19
20namespace armnn
21{
22RefL2NormalizationWorkload::RefL2NormalizationWorkload(
Matteo Martincighe011d202019-11-28 11:35:47 +000023 const L2NormalizationQueueDescriptor& descriptor,
24 const WorkloadInfo& info)
25 : BaseWorkload<L2NormalizationQueueDescriptor>(descriptor, info) {}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010026
Matteo Martincighe011d202019-11-28 11:35:47 +000027void RefL2NormalizationWorkload::Execute() const
28{
Finn Williamsb8181f72021-04-07 10:23:21 +010029 Execute(m_Data.m_Inputs, m_Data.m_Outputs);
30}
31
32void RefL2NormalizationWorkload::ExecuteAsync(WorkingMemDescriptor &workingMemDescriptor)
33{
34 Execute(workingMemDescriptor.m_Inputs, workingMemDescriptor.m_Outputs);
35}
36
37void RefL2NormalizationWorkload::Execute(std::vector<ITensorHandle*> inputs, std::vector<ITensorHandle*> outputs) const
38{
Matteo Martincighe011d202019-11-28 11:35:47 +000039 ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefL2NormalizationWorkload_Execute");
40
Finn Williamsb8181f72021-04-07 10:23:21 +010041 const TensorInfo& inputInfo = GetTensorInfo(inputs[0]);
42 const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
Matteo Martincighe011d202019-11-28 11:35:47 +000043
Finn Williamsb8181f72021-04-07 10:23:21 +010044 auto inputDecoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
45 auto outputEncoder = MakeEncoder<float>(outputInfo, outputs[0]->Map());
Matteo Martincighe011d202019-11-28 11:35:47 +000046
47 DataLayoutIndexed dataLayout(m_Data.m_Parameters.m_DataLayout);
48
49 const TensorShape& shape = inputInfo.GetShape();
50 unsigned int paddedShapeArray[4];
Matthew Sloyan171214c2020-09-09 09:07:37 +010051 const int idxShift = 4 - armnn::numeric_cast<int>(shape.GetNumDimensions());
Matteo Martincighe011d202019-11-28 11:35:47 +000052
53 const unsigned int batches = (idxShift == 0) ? shape[0] : 1;
54 paddedShapeArray[0] = batches;
55
Matthew Sloyan171214c2020-09-09 09:07:37 +010056 const int channelsIdx = armnn::numeric_cast<int>(dataLayout.GetChannelsIndex());
Matteo Martincighe011d202019-11-28 11:35:47 +000057 const unsigned int channels = (channelsIdx - idxShift >= 0)
Matthew Sloyan171214c2020-09-09 09:07:37 +010058 ? shape[armnn::numeric_cast<unsigned int>(channelsIdx - idxShift)]
Matteo Martincighe011d202019-11-28 11:35:47 +000059 : 1;
60 paddedShapeArray[channelsIdx] = channels;
61
Matthew Sloyan171214c2020-09-09 09:07:37 +010062 const int heightIdx = armnn::numeric_cast<int>(dataLayout.GetHeightIndex());
Matteo Martincighe011d202019-11-28 11:35:47 +000063 const unsigned int height = (heightIdx - idxShift >= 0)
Matthew Sloyan171214c2020-09-09 09:07:37 +010064 ? shape[armnn::numeric_cast<unsigned int>(heightIdx - idxShift)]
Matteo Martincighe011d202019-11-28 11:35:47 +000065 : 1;
66 paddedShapeArray[heightIdx] = height;
67
Matthew Sloyan171214c2020-09-09 09:07:37 +010068 const int widthIdx = armnn::numeric_cast<int>(dataLayout.GetWidthIndex());
Matteo Martincighe011d202019-11-28 11:35:47 +000069 const unsigned int width = (widthIdx - idxShift >= 0)
Matthew Sloyan171214c2020-09-09 09:07:37 +010070 ? shape[armnn::numeric_cast<unsigned int>(widthIdx - idxShift)]
Matteo Martincighe011d202019-11-28 11:35:47 +000071 : 1;
72 paddedShapeArray[widthIdx] = width;
73
74 const TensorShape& paddedShape = TensorShape(4, paddedShapeArray);
75
76 for (unsigned int n = 0; n < batches; ++n)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010077 {
Matteo Martincighe011d202019-11-28 11:35:47 +000078 for (unsigned int c = 0; c < channels; ++c)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010079 {
Matteo Martincighe011d202019-11-28 11:35:47 +000080 for (unsigned int h = 0; h < height; ++h)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010081 {
Matteo Martincighe011d202019-11-28 11:35:47 +000082 for (unsigned int w = 0; w < width; ++w)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010083 {
Matteo Martincighe011d202019-11-28 11:35:47 +000084 float reduction = 0.0;
85 for (unsigned int d = 0; d < channels; ++d)
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010086 {
Matteo Martincighe011d202019-11-28 11:35:47 +000087 unsigned int inputIndex = dataLayout.GetIndex(paddedShape, n, d, h, w);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010088
Matteo Martincighe011d202019-11-28 11:35:47 +000089 (*inputDecoder)[inputIndex];
90 const float value = inputDecoder->Get();
91 reduction += value * value;
Ferran Balaguerd73d14f2019-06-10 10:29:54 +010092 }
Matteo Martincighe011d202019-11-28 11:35:47 +000093
94 unsigned int index = dataLayout.GetIndex(paddedShape, n, c, h, w);
95
96 float maximum = reduction < m_Data.m_Parameters.m_Eps ? m_Data.m_Parameters.m_Eps : reduction;
97
98 const float scale = 1.0f / sqrtf(maximum);
99
100 (*inputDecoder)[index];
101 (*outputEncoder)[index];
102 outputEncoder->Set(inputDecoder->Get() * scale);
Ferran Balaguerd73d14f2019-06-10 10:29:54 +0100103 }
104 }
105 }
106 }
Matteo Martincighe011d202019-11-28 11:35:47 +0000107}
Ferran Balaguerd73d14f2019-06-10 10:29:54 +0100108
109} //namespace armnn