IVGCVSW-4018 Move QuantizeHelper.hpp to armnnUtils
* Moved QuntizeHelper.hpp to armnnUtils
* Reordered parameters for QuantizedVector and added default
values for qScale and qOffset to make life easier when
using the function for non-quantized types such as Float16
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I28c263dfa425f1316feccb4116839a84f5d568e5
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
index 5c75b6f..569f5af 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
@@ -6,6 +6,7 @@
#include "L2NormalizationTestImpl.hpp"
#include <Permute.hpp>
+#include <QuantizeHelper.hpp>
#include <ResolveType.hpp>
#include <TensorUtils.hpp>
@@ -44,10 +45,10 @@
inputData = tmp;
}
- auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset(),
- inputData));
+ auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
+ armnnUtils::QuantizedVector<T>(inputData,
+ inputTensorInfo.GetQuantizationScale(),
+ inputTensorInfo.GetQuantizationOffset()));
std::vector<float> expectedOutputData = expectedOutputValues;
if (layout == armnn::DataLayout::NHWC)
@@ -59,10 +60,11 @@
}
LayerTestResult<T, 4> result(outputTensorInfo);
- result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset(),
- expectedOutputData));
+ result.outputExpected =
+ MakeTensor<T, 4>(outputTensorInfo,
+ armnnUtils::QuantizedVector<T>(expectedOutputData,
+ outputTensorInfo.GetQuantizationScale(),
+ outputTensorInfo.GetQuantizationOffset()));
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -693,16 +695,10 @@
const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
- auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, QuantizedVector<float>(
- inputTensorInfo.GetQuantizationScale(),
- inputTensorInfo.GetQuantizationOffset(),
- inputData));
+ auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
LayerTestResult<float, 2> result(outputTensorInfo);
- result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, QuantizedVector<float>(
- outputTensorInfo.GetQuantizationScale(),
- outputTensorInfo.GetQuantizationOffset(),
- expectedOutputData));
+ result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);