IVGCVSW-4018 Move QuantizeHelper.hpp to armnnUtils
* Moved QuntizeHelper.hpp to armnnUtils
* Reordered parameters for QuantizedVector and added default
values for qScale and qOffset to make life easier when
using the function for non-quantized types such as Float16
Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I28c263dfa425f1316feccb4116839a84f5d568e5
diff --git a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp
index c46376b..dd851e3 100644
--- a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp
@@ -7,6 +7,7 @@
#include "CommonTestUtils.hpp"
+#include <QuantizeHelper.hpp>
#include <ResolveType.hpp>
#include <armnn/ArmNN.hpp>
@@ -53,8 +54,8 @@
};
// quantize data
- std::vector<T> qInputData = QuantizedVector<T>(qScale, qOffset, inputData);
- std::vector<T> qExpectedOutputData = QuantizedVector<T>(qScale, qOffset, expectedOutputData);
+ std::vector<T> qInputData = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+ std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
INetworkPtr network = CreateAbsNetwork(tensorInfo);