blob: cf355d35d278bb982f48e58ca86306dc460a6a93 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
telsoa014fcda012018-03-09 14:13:49 +00002// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
5
6#include "RefFakeQuantizationFloat32Workload.hpp"
7
8#include "RefWorkloadUtils.hpp"
9
10#include "Profiling.hpp"
11
Matthew Sloyan24ac8592020-09-23 16:57:23 +010012#include <armnn/utility/NumericCast.hpp>
telsoa014fcda012018-03-09 14:13:49 +000013
14namespace armnn
15{
16
17void FakeQuantization(const float* inputData, float* outputData, uint32_t numElements, float min, float max)
18{
19 float scale = (max - min) / 255.f;
Matthew Sloyan24ac8592020-09-23 16:57:23 +010020 int32_t offset = armnn::numeric_cast<int32_t>((-min * 255.f) / (max - min));
telsoa014fcda012018-03-09 14:13:49 +000021
22 for (uint32_t i = 0; i < numElements; i++)
23 {
24 outputData[i] = static_cast<float>(armnn::Quantize<uint8_t>(inputData[i], scale, offset));
25 }
26
27}
28
29void RefFakeQuantizationFloat32Workload::Execute() const
30{
31 ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefFakeQuantizationFloat32Workload_Execute");
32
33 const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
34
35 const float* inputData = GetInputTensorDataFloat(0, m_Data);
36 float* outputData = GetOutputTensorDataFloat(0, m_Data);
37 FakeQuantization(inputData, outputData, inputInfo.GetNumElements(),
38 m_Data.m_Parameters.m_Min,
39 m_Data.m_Parameters.m_Max);
40}
41
42} //namespace armnn