IVGCVSW-4018 Move QuantizeHelper.hpp to armnnUtils

* Moved QuntizeHelper.hpp to armnnUtils
* Reordered parameters for QuantizedVector and added default
  values for qScale and qOffset to make life easier when
  using the function for non-quantized types such as Float16

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I28c263dfa425f1316feccb4116839a84f5d568e5
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 075c29d..a45c6d5 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -5,12 +5,12 @@
 
 #include "ActivationTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
 
 #include <backendsCommon/test/ActivationFixture.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -424,7 +424,7 @@
 
     LayerTestResult<T, 4> result(inputTensorInfo);
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(scale, offset, inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -451,8 +451,8 @@
     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
 
     // Calculated manually.
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(outScale, outOffset,
-                                                                                  outputExpectedData));
+    result.outputExpected =
+        MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
 
     return result;
 }
@@ -812,7 +812,7 @@
 
     LayerTestResult<float, 5> result(inputTensorInfo);
 
-    auto input = MakeTensor<float, 5>(inputTensorInfo, QuantizedVector<float>(0.f, 0.f, inputData));
+    auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -836,8 +836,7 @@
     CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get());
 
     // Calculated manually.
-    result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, QuantizedVector<float>(0.f, 0.f,
-                                                                                  outputExpectedData));
+    result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
 
     return result;
 };