IVGCVSW-4018 Move QuantizeHelper.hpp to armnnUtils

* Moved QuntizeHelper.hpp to armnnUtils
* Reordered parameters for QuantizedVector and added default
  values for qScale and qOffset to make life easier when
  using the function for non-quantized types such as Float16

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I28c263dfa425f1316feccb4116839a84f5d568e5
diff --git a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp
index c46376b..dd851e3 100644
--- a/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/AbsEndToEndTestImpl.hpp
@@ -7,6 +7,7 @@
 
 #include "CommonTestUtils.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -53,8 +54,8 @@
     };
 
     // quantize data
-    std::vector<T> qInputData          = QuantizedVector<T>(qScale, qOffset, inputData);
-    std::vector<T> qExpectedOutputData = QuantizedVector<T>(qScale, qOffset, expectedOutputData);
+    std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+    std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
 
     INetworkPtr network = CreateAbsNetwork(tensorInfo);
 
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 8a96318..f310ef7 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -36,7 +36,6 @@
     OptimizeSubgraphViewTests.cpp
     OptimizationViewsTests.cpp
     PreluEndToEndTestImpl.hpp
-    QuantizeHelper.hpp
     QuantizedLstmEndToEndTestImpl.cpp
     QuantizedLstmEndToEndTestImpl.hpp
     ResizeEndToEndTestImpl.hpp
diff --git a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
index cf4db1d..fd0b12f 100644
--- a/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DepthToSpaceEndToEndTestImpl.hpp
@@ -9,8 +9,9 @@
 
 #include <armnn/ArmNN.hpp>
 
+#include <QuantizeHelper.hpp>
+
 #include <backendsCommon/test/DataLayoutUtils.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 
 namespace
 {
@@ -58,8 +59,8 @@
         outputInfo.SetQuantizationOffset(qOffset);
     }
 
-    std::vector<T> inputData          = QuantizedVector<T>(qScale, qOffset, floatInputData);
-    std::vector<T> expectedOutputData = QuantizedVector<T>(qScale, qOffset, floatExpectedOutputData);
+    std::vector<T> inputData          = armnnUtils::QuantizedVector<T>(floatInputData, qScale, qOffset);
+    std::vector<T> expectedOutputData = armnnUtils::QuantizedVector<T>(floatExpectedOutputData, qScale, qOffset);
 
     // Permute tensors from NHWC to NCHW (if needed)
     if (descriptor.m_DataLayout == DataLayout::NCHW)
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index ee9d2bc..d6f589f 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -4,13 +4,12 @@
 //
 #pragma once
 
-#include <ResolveType.hpp>
-
 #include <armnn/ArmNN.hpp>
 #include <armnn/INetwork.hpp>
-#include <Profiling.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
+#include <Profiling.hpp>
+#include <QuantizeHelper.hpp>
+#include <ResolveType.hpp>
 
 #include <boost/test/unit_test.hpp>
 
@@ -99,9 +98,9 @@
 
     return ConstantUsageTest(backends,
         commonTensorInfo,
-        QuantizedVector<uint8_t>(scale, offset, { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }), // Input.
-        QuantizedVector<uint8_t>(scale, offset, { 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }), // Const input.
-        QuantizedVector<uint8_t>(scale, offset, { 7.f, 7.f, 7.f, 7.f, 7.f, 7.f })  // Expected output.
+        armnnUtils::QuantizedVector<uint8_t>({ 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }, scale, offset), // Input.
+        armnnUtils::QuantizedVector<uint8_t>({ 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }, scale, offset), // Const input.
+        armnnUtils::QuantizedVector<uint8_t>({ 7.f, 7.f, 7.f, 7.f, 7.f, 7.f }, scale, offset)  // Expected output.
     );
 }
 
diff --git a/src/backends/backendsCommon/test/QuantizeHelper.hpp b/src/backends/backendsCommon/test/QuantizeHelper.hpp
deleted file mode 100644
index b7ca3b3..0000000
--- a/src/backends/backendsCommon/test/QuantizeHelper.hpp
+++ /dev/null
@@ -1,112 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include <armnn/ArmNN.hpp>
-#include <armnn/TypesUtils.hpp>
-
-#include <Half.hpp>
-
-#include <initializer_list>
-#include <iterator>
-#include <vector>
-
-#include <boost/core/ignore_unused.hpp>
-#include <boost/numeric/conversion/cast.hpp>
-
-template<typename T, bool DoQuantize=true>
-struct SelectiveQuantizer
-{
-    static T Quantize(float value, float scale, int32_t offset)
-    {
-        return armnn::Quantize<T>(value, scale, offset);
-    }
-
-    static float Dequantize(T value, float scale, int32_t offset)
-    {
-        return armnn::Dequantize(value, scale, offset);
-    }
-};
-
-template<typename T>
-struct SelectiveQuantizer<T, false>
-{
-    static T Quantize(float value, float scale, int32_t offset)
-    {
-        boost::ignore_unused(scale, offset);
-        return value;
-    }
-
-    static float Dequantize(T value, float scale, int32_t offset)
-    {
-        boost::ignore_unused(scale, offset);
-        return value;
-    }
-};
-
-template<>
-struct SelectiveQuantizer<armnn::Half, false>
-{
-    static armnn::Half Quantize(float value, float scale, int32_t offset)
-    {
-        boost::ignore_unused(scale, offset);
-        return armnn::Half(value);
-    }
-
-    static float Dequantize(armnn::Half value, float scale, int32_t offset)
-    {
-        boost::ignore_unused(scale, offset);
-        return value;
-    }
-};
-
-template<typename T>
-T SelectiveQuantize(float value, float scale, int32_t offset)
-{
-    return SelectiveQuantizer<T, armnn::IsQuantizedType<T>()>::Quantize(value, scale, offset);
-};
-
-template<typename T>
-float SelectiveDequantize(T value, float scale, int32_t offset)
-{
-    return SelectiveQuantizer<T, armnn::IsQuantizedType<T>()>::Dequantize(value, scale, offset);
-};
-
-template<typename ItType>
-struct IsFloatingPointIterator
-{
-    static constexpr bool value=std::is_floating_point<typename std::iterator_traits<ItType>::value_type>::value;
-};
-
-template <typename T, typename FloatIt,
-typename std::enable_if<IsFloatingPointIterator<FloatIt>::value, int>::type=0 // Makes sure fp iterator is valid.
->
-std::vector<T> QuantizedVector(float qScale, int32_t qOffset, FloatIt first, FloatIt last)
-{
-    std::vector<T> quantized;
-    quantized.reserve(boost::numeric_cast<size_t>(std::distance(first, last)));
-
-    for (auto it = first; it != last; ++it)
-    {
-        auto f = *it;
-        T q =SelectiveQuantize<T>(f, qScale, qOffset);
-        quantized.push_back(q);
-    }
-
-    return quantized;
-}
-
-template<typename T>
-std::vector<T> QuantizedVector(float qScale, int32_t qOffset, const std::vector<float>& array)
-{
-    return QuantizedVector<T>(qScale, qOffset, array.begin(), array.end());
-}
-
-template<typename T>
-std::vector<T> QuantizedVector(float qScale, int32_t qOffset, std::initializer_list<float> array)
-{
-    return QuantizedVector<T>(qScale, qOffset, array.begin(), array.end());
-}
diff --git a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
index 4bf9d51..1eeb944 100644
--- a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
@@ -9,6 +9,7 @@
 #include <armnn/ArmNN.hpp>
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
@@ -119,8 +120,8 @@
     }
 
     // quantize data
-    std::vector<T> qInputData          = QuantizedVector<T>(qScale, qOffset, inputData);
-    std::vector<T> qExpectedOutputData = QuantizedVector<T>(qScale, qOffset, expectedOutputData);
+    std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+    std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
 
     INetworkPtr network = CreateResizeNetwork(descriptor, inputInfo, outputInfo);
 
@@ -144,4 +145,4 @@
                                    armnn::DataLayout dataLayout)
 {
     ResizeEndToEnd<ArmnnType>(backends, dataLayout, armnn::ResizeMethod::NearestNeighbor);
-}
\ No newline at end of file
+}
diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
index 9d6312e..4935a18 100644
--- a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
@@ -9,6 +9,7 @@
 #include <armnn/ArmNN.hpp>
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
@@ -129,12 +130,12 @@
     }
 
     // quantize data
-    std::vector<T> qInputData          = QuantizedVector<T>(qScale, qOffset, inputData);
-    std::vector<T> qWeightsData        = QuantizedVector<T>(qScale, qOffset, weightsData);
-    std::vector<T> qExpectedOutputData = QuantizedVector<T>(qScale, qOffset, expectedOutputData);
+    std::vector<T> qInputData          = armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset);
+    std::vector<T> qWeightsData        = armnnUtils::QuantizedVector<T>(weightsData, qScale, qOffset);
+    std::vector<T> qExpectedOutputData = armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset);
 
     using BT = ResolveType<ArmnnBType>;
-    std::vector<BT> qBiasesData  = QuantizedVector<BT>(qScale * qScale, 0, biasesData);
+    std::vector<BT> qBiasesData = armnnUtils::QuantizedVector<BT>(biasesData, qScale * qScale, 0);
 
     ConstTensor weights(weightsInfo, qWeightsData);
     ConstTensor biases(biasesInfo, qBiasesData);
@@ -150,4 +151,4 @@
                                                 { { 0, qInputData } },
                                                 { { 0, qExpectedOutputData } },
                                                 backends);
-}
\ No newline at end of file
+}
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 075c29d..a45c6d5 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -5,12 +5,12 @@
 
 #include "ActivationTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
 
 #include <backendsCommon/test/ActivationFixture.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -424,7 +424,7 @@
 
     LayerTestResult<T, 4> result(inputTensorInfo);
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(scale, offset, inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, scale, offset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -451,8 +451,8 @@
     CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
 
     // Calculated manually.
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(outScale, outOffset,
-                                                                                  outputExpectedData));
+    result.outputExpected =
+        MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, outScale, outOffset));
 
     return result;
 }
@@ -812,7 +812,7 @@
 
     LayerTestResult<float, 5> result(inputTensorInfo);
 
-    auto input = MakeTensor<float, 5>(inputTensorInfo, QuantizedVector<float>(0.f, 0.f, inputData));
+    auto input = MakeTensor<float, 5>(inputTensorInfo, inputData);
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -836,8 +836,7 @@
     CopyDataFromITensorHandle(&result.output[0][0][0][0][0], outputHandle.get());
 
     // Calculated manually.
-    result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, QuantizedVector<float>(0.f, 0.f,
-                                                                                  outputExpectedData));
+    result.outputExpected = MakeTensor<float, 5>(outputTensorInfo, outputExpectedData);
 
     return result;
 };
diff --git a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
index c6d3982..247821b 100644
--- a/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/AdditionTestImpl.cpp
@@ -7,6 +7,8 @@
 
 #include "ElementwiseTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
+
 template<>
 std::unique_ptr<armnn::IWorkload> CreateWorkload<armnn::AdditionQueueDescriptor>(
     const armnn::IWorkloadFactory& workloadFactory,
@@ -177,7 +179,7 @@
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
     {
         0.0f,
         1.0f,
@@ -187,16 +189,18 @@
 
         4.0f,
         5.0f,
-    }));
+    },
+    qScale, qOffset));
 
-    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
+    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
     {
         0.5f, 1.5f, 2.5f,
         3.5f, 4.5f, 5.5f,
-    }));
+    },
+    qScale, qOffset));
 
     LayerTestResult<T,4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
     {
         0.5f, 1.5f, 2.5f,
         4.5f, 5.5f, 6.5f,
@@ -206,7 +210,8 @@
 
         4.5f, 5.5f, 6.5f,
         8.5f, 9.5f, 10.5f,
-    }));
+    },
+    qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
@@ -256,31 +261,34 @@
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, armnnUtils::QuantizedVector<T>(
     {
-            0.0f,  1.0f,  2.0f,
-            3.0f,  4.0f,  5.0f,
-            6.0f,  7.0f,  8.0f,
-            9.0f, 10.0f, 11.0f,
+         0.0f,  1.0f,  2.0f,
+         3.0f,  4.0f,  5.0f,
+         6.0f,  7.0f,  8.0f,
+         9.0f, 10.0f, 11.0f,
         12.0f, 13.0f, 14.0f,
         15.0f, 16.0f, 17.0f,
-    }));
+    },
+    qScale, qOffset));
 
-    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
+    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, armnnUtils::QuantizedVector<T>(
     {
         0.5f,
-    }));
+    },
+    qScale, qOffset));
 
     LayerTestResult<T,4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, armnnUtils::QuantizedVector<T>(
     {
-            0.5f,  1.5f,  2.5f,
-            3.5f,  4.5f,  5.5f,
-            6.5f,  7.5f,  8.5f,
-            9.5f, 10.5f, 11.5f,
+         0.5f,  1.5f,  2.5f,
+         3.5f,  4.5f,  5.5f,
+         6.5f,  7.5f,  8.5f,
+         9.5f, 10.5f, 11.5f,
         12.5f, 13.5f, 14.5f,
         15.5f, 16.5f, 17.5f,
-    }));
+    },
+    qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
     std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
diff --git a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
index ef43088..68cda7c 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchNormalizationTestImpl.cpp
@@ -6,6 +6,7 @@
 #include "BatchNormalizationTestImpl.hpp"
 
 #include <DataLayoutIndexed.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -14,7 +15,6 @@
 #include <backendsCommon/IBackendInternal.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -23,6 +23,8 @@
 namespace
 {
 
+using namespace armnnUtils;
+
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 4> BatchNormTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
@@ -53,19 +55,18 @@
         tensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
-                                        QuantizedVector<T>(qScale, qOffset, inputValues));
+    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputValues, qScale, qOffset));
 
     // These values are per-channel of the input.
-    auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
-    auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4,  9}));
-    auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3,  2}));
-    auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2,  1}));
+    auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
+    auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4,  9 }, qScale, qOffset));
+    auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3,  2 }, qScale, qOffset));
+    auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2,  1 }, qScale, qOffset));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
 
     result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
-                                             QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
+                                             QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -134,17 +135,18 @@
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset,
+        QuantizedVector<T>(
         {
             1.f, 1.f, 4.f, 1.f,
             4.f, 4.f, 2.f, 1.f,
             1.f, -2.f, 6.f, 4.f
-        }));
+        },
+        qScale, qOffset));
     // These values are per-channel of the input.
-    auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
-    auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
-    auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
-    auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
+    auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3, -2 }, qScale, qOffset));
+    auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 4,  9 }, qScale, qOffset));
+    auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 3,  2 }, qScale, qOffset));
+    auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>({ 2,  1 }, qScale, qOffset));
     LayerTestResult<T,4> ret(outputTensorInfo);
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
@@ -175,12 +177,13 @@
     // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
     // multiply by gamma and add beta
     ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset,
+        QuantizedVector<T>(
         {
             1.f, 3.f, 4.f, 3.f,
             4.f, 4.f, 2.f, 3.f,
             1.f, 2.f, 6.f, 4.f
-        }));
+        },
+        qScale, qOffset));
 
     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
 
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index 9da1d42..1c54b85 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -8,12 +8,12 @@
 #include <armnn/ArmNN.hpp>
 
 #include <Half.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <backendsCommon/Workload.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -155,8 +155,8 @@
 {
     using T = armnn::ResolveType<ArmnnInType>;
 
-    std::vector<T> inputData0 = QuantizedVector<T>(quantScale, quantOffset, testData.m_InputData0);
-    std::vector<T> inputData1 = QuantizedVector<T>(quantScale, quantOffset, testData.m_InputData1);
+    std::vector<T> inputData0 = armnnUtils::QuantizedVector<T>(testData.m_InputData0, quantScale, quantOffset);
+    std::vector<T> inputData1 = armnnUtils::QuantizedVector<T>(testData.m_InputData1, quantScale, quantOffset);
 
     return ComparisonTestImpl<4, ArmnnInType>(
         workloadFactory,
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 29476e5..e9932c8 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -6,6 +6,7 @@
 #include "ConcatTestImpl.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -15,22 +16,25 @@
 
 #include <test/TensorHelpers.hpp>
 
+using namespace armnn;
+using namespace armnnUtils;
+
 //
 // Helper functions and templates
 //
 
-armnn::OriginsDescriptor CreateDescriptorForConcat(
-    const std::vector<armnn::TensorInfo> & inputTensorInfos,
+OriginsDescriptor CreateDescriptorForConcat(
+    const std::vector<TensorInfo> & inputTensorInfos,
     unsigned int concatDim)
 {
-    std::vector<armnn::TensorShape> shapes;
+    std::vector<TensorShape> shapes;
     shapes.reserve(inputTensorInfos.size());
-    for (const armnn::TensorInfo& it: inputTensorInfos)
+    for (const TensorInfo& it: inputTensorInfos)
     {
         shapes.push_back(it.GetShape());
     }
 
-    return armnn::CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim);
+    return CreateDescriptorForConcatenation(shapes.begin(), shapes.end(), concatDim);
 }
 
 //
@@ -40,7 +44,7 @@
 //
 
 bool NeedPermuteForConcat(
-    const std::vector<armnn::TensorInfo> & inputTensorInfos,
+    const std::vector<TensorInfo> & inputTensorInfos,
     unsigned int concatDim)
 {
     // See note above. Additionally we expect the input shapes to have the
@@ -65,7 +69,7 @@
     return (nDimensions < 3 || (nDimensions == 3 && (nDimensions-concatDim) < 3 && (nDimensions-concatDim) != 1));
 }
 
-armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
+TensorShape ExpandTensorShapeTo3dForPermute(const TensorShape & inputShape)
 {
     unsigned int numDims = inputShape.GetNumDimensions();
     if (numDims >= 3)
@@ -80,13 +84,13 @@
     {
         newDims[expandedBy+i] = inputShape[i];
     }
-    return armnn::TensorShape(3u, &newDims[0]);
+    return TensorShape(3u, &newDims[0]);
 }
 
 void Generate3dPermuteVectorForConcat(
     unsigned int numDimensions,
     unsigned int & concatDim,
-    std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
+    std::pair<PermutationVector, PermutationVector> & permutations)
 {
     BOOST_ASSERT_MSG(numDimensions <= 3,
        "Only dimensions 1,2 and 3 are supported by this helper");
@@ -96,15 +100,15 @@
     if (expandedConcatAxis == 2)
     {
         concatDim = 0;
-        armnn::PermutationVector forwardPermutation({1, 2, 0});
-        armnn::PermutationVector reversePermutation({2, 0, 1});
+        PermutationVector forwardPermutation({1, 2, 0});
+        PermutationVector reversePermutation({2, 0, 1});
         permutations = std::make_pair(forwardPermutation, reversePermutation);
     }
     else if (expandedConcatAxis == 1)
     {
         concatDim = 0;
-        armnn::PermutationVector forwardPermutation({2, 0, 1});
-        armnn::PermutationVector reversePermutation({1, 2, 0});
+        PermutationVector forwardPermutation({2, 0, 1});
+        PermutationVector reversePermutation({1, 2, 0});
         permutations = std::make_pair(forwardPermutation, reversePermutation);
     }
     else
@@ -115,10 +119,10 @@
 }
 
 template<typename T> void PermuteTensorData(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::PermutationVector& mappings,
-    armnn::TensorInfo & inputTensorInfo,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const PermutationVector& mappings,
+    TensorInfo & inputTensorInfo,
     const T * inputData,
     std::vector<T>& outputData)
 {
@@ -131,18 +135,18 @@
         return;
     }
 
-    armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
+    TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    armnn::PermuteQueueDescriptor queueDescriptor;
-    queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
-    armnn::WorkloadInfo workloadInfo;
+    PermuteQueueDescriptor queueDescriptor;
+    queueDescriptor.m_Parameters = PermuteDescriptor{mappings};
+    WorkloadInfo workloadInfo;
     AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
@@ -164,23 +168,23 @@
 // of the permuted concatenated tensor is going to be.
 //
 template<typename T> void PermuteInputsForConcat(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    std::vector<armnn::TensorInfo> & inputTensorInfos,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    std::vector<TensorInfo> & inputTensorInfos,
     std::vector<T *> & inputData,
     std::vector<std::vector<T>> & inputDataStorage,
-    armnn::PermutationVector & permuteVector,
+    PermutationVector & permuteVector,
     unsigned int & concatDim,
-    armnn::TensorInfo & outputTensorInfo)
+    TensorInfo & outputTensorInfo)
 {
     BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
         "Expecting more than one tensor to be concatenated here");
 
     unsigned int numDims = 0;
     unsigned int nthInput = 0;
-    const armnn::PermutationVector identity({0, 1, 2});
+    const PermutationVector identity({0, 1, 2});
 
-    std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
+    std::pair<PermutationVector, PermutationVector> permutations =
         std::make_pair(identity, identity);
 
     inputDataStorage.resize(inputData.size());
@@ -203,7 +207,7 @@
                 "All inputs must have the same number of dimensions");
         }
 
-        armnn::TensorInfo newTensorInfo = tensorInfo;
+        TensorInfo newTensorInfo = tensorInfo;
         newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
 
         PermuteTensorData<T>(workloadFactory,
@@ -231,11 +235,11 @@
 // output.
 //
 template <typename T> void PermuteOutputForConcat(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo & tensorInfo,
-    const armnn::PermutationVector & permuteVector,
-    std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const TensorInfo & tensorInfo,
+    const PermutationVector & permuteVector,
+    std::unique_ptr<ITensorHandle> && inputDataHandle,
     T * data)
 {
     BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
@@ -247,7 +251,7 @@
         return;
     }
 
-    armnn::TensorInfo resultTensorInfo = tensorInfo;
+    TensorInfo resultTensorInfo = tensorInfo;
     std::vector<T> inputData(tensorInfo.GetNumElements());
     std::vector<T> outputData;
 
@@ -264,11 +268,11 @@
 }
 
 template<typename T> void Concatenate(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    std::initializer_list<const TensorInfo> inputTensorInfosOrig,
     std::initializer_list<T *> inputsOrig,
-    const armnn::TensorInfo& outputTensorInfoOrig,
+    const TensorInfo& outputTensorInfoOrig,
     T * output,
     unsigned int concatDim,
     bool useSubtensor)
@@ -283,11 +287,11 @@
     }
 
     // Saves a copy of the parameters which we might need to change.
-    std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
+    std::vector<TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
     std::vector<T *> inputs            = inputsOrig;
-    armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
+    TensorInfo outputTensorInfo = outputTensorInfoOrig;
 
-    armnn::PermutationVector permuteVector{0, 1, 2};
+    PermutationVector permuteVector{0, 1, 2};
 
     // Holds and automatically releases memory for the reshaped input data.
     std::vector<std::vector<T>> tmpInputDataStorage;
@@ -312,15 +316,15 @@
                                   outputTensorInfo);
     }
 
-    armnn::WorkloadInfo workloadInfo;
+    WorkloadInfo workloadInfo;
 
-    std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
+    std::vector<std::unique_ptr<ITensorHandle>> inputHandles;
     inputHandles.reserve(inputCount);
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
-    armnn::ConcatQueueDescriptor queueDescriptor;
-    armnn::OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
+    ConcatQueueDescriptor queueDescriptor;
+    OriginsDescriptor viewsDescriptor = CreateDescriptorForConcat(inputTensorInfos, concatDim);
     queueDescriptor.m_Parameters = viewsDescriptor;
 
     if (useSubtensor)
@@ -337,8 +341,8 @@
         const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
         for (unsigned int i = 0; i < inputCount; ++i)
         {
-            const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
-            std::unique_ptr<armnn::ITensorHandle> inputHandle =
+            const TensorInfo& inputTensorInfo = inputTensorInfos[i];
+            std::unique_ptr<ITensorHandle> inputHandle =
                 subTensorsSupported ?
                     workloadFactory.CreateSubTensorHandle(*outputHandle,
                                                           inputTensorInfo.GetShape(),
@@ -353,7 +357,7 @@
     {
         for (unsigned int i = 0; i < inputCount; ++i)
         {
-            std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
+            std::unique_ptr<ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfos[i]);
             inputHandles.emplace_back(std::move(inputHandle));
         }
     }
@@ -365,7 +369,7 @@
 
     AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(queueDescriptor, workloadInfo);
 
     for (auto& inputHandle : inputHandles)
     {
@@ -403,20 +407,20 @@
 // Implementation templates
 //
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 1> Concat1dTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo({ 3 }, ArmnnType, qScale, qOffset);
 
-    auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
-    auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
-    auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
+    auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 1.0f, 2.0f, 3.0f }, qScale, qOffset));
+    auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 4.0f, 5.0f, 6.0f }, qScale, qOffset));
+    auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>({ 7.0f, 8.0f, 9.0f }, qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 9 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 1> result(outputTensorInfo);
 
@@ -430,48 +434,56 @@
                    0,
                    true);
 
-    result.output = MakeTensor<T, 1>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
-    }));
+    result.output         = MakeTensor<T, 1>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(
+        {
+            1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Concat2dTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo& outputTensorInfo,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const TensorInfo& outputTensorInfo,
     unsigned int dimension,
     const float qScale,
     const int32_t qOffset)
 {
-    armnn::TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
 
-    auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
-    }));
+            // Batch 1
+            10.0f, 11.0f, 12.0f,
+        },
+        qScale, qOffset));
 
-    auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        4.0f, 5.0f, 6.0f,
+    auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
+         {
+            // Batch 0
+            4.0f, 5.0f, 6.0f,
 
-        // Batch 1
-        13.0f, 14.0f, 15.0f,
-    }));
+            // Batch 1
+            13.0f, 14.0f, 15.0f,
+        },
+        qScale, qOffset));
 
-    auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        7.0f, 8.0f, 9.0f,
+    auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            7.0f, 8.0f, 9.0f,
 
-        // Batch 1
-        16.0f, 17.0f, 18.0f,
-    }));
+            // Batch 1
+            16.0f, 17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
     LayerTestResult<T, 2> result(outputTensorInfo);
 
@@ -489,99 +501,109 @@
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Concat2dDim0TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 0, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
+            // Batch 1
+            10.0f, 11.0f, 12.0f,
 
-        // Batch 2
-        4.0f, 5.0f, 6.0f,
+            // Batch 2
+            4.0f, 5.0f, 6.0f,
 
-        // Batch 3
-        13.0f, 14.0f, 15.0f,
+            // Batch 3
+            13.0f, 14.0f, 15.0f,
 
-        // Batch 4
-        7.0f, 8.0f, 9.0f,
+            // Batch 4
+            7.0f, 8.0f, 9.0f,
 
-        // Batch 5
-        16.0f, 17.0f, 18.0f,
-    }));
+            // Batch 5
+            16.0f, 17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Concat2dDim1TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 2> result = Concat2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 1, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
-    }));
+            // Batch 1
+            10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Concat2dDim0DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
-    }));
+            // Batch 1
+            10.0f, 11.0f, 12.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        4.0f, 5.0f, 6.0f,
+    TensorInfo input1TensorInfo({ 3, 3 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            4.0f, 5.0f, 6.0f,
 
-        // Batch 1
-        13.0f, 14.0f, 15.0f,
+            // Batch 1
+            13.0f, 14.0f, 15.0f,
 
-        // Batch 0
-        7.0f, 8.0f, 9.0f,
-    }));
+            // Batch 0
+            7.0f, 8.0f, 9.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 1
-        16.0f, 17.0f, 18.0f,
-    }));
+    TensorInfo input2TensorInfo({ 1, 3 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 1
+            16.0f, 17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 6, 3 }, ArmnnType, qScale, qOffset);
     LayerTestResult<T, 2> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -595,64 +617,72 @@
                    true);
 
     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
+            // Batch 1
+            10.0f, 11.0f, 12.0f,
 
-        // Batch 2
-        4.0f, 5.0f, 6.0f,
+            // Batch 2
+            4.0f, 5.0f, 6.0f,
 
-        // Batch 3
-        13.0f, 14.0f, 15.0f,
+            // Batch 3
+            13.0f, 14.0f, 15.0f,
 
-        // Batch 4
-        7.0f, 8.0f, 9.0f,
+            // Batch 4
+            7.0f, 8.0f, 9.0f,
 
-        // Batch 5
-        16.0f, 17.0f, 18.0f,
-    }));
+            // Batch 5
+            16.0f, 17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Concat2dDim1DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f,
+    TensorInfo input0TensorInfo({ 2, 3 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f,
-    }));
+            // Batch 1
+            10.0f, 11.0f, 12.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
+    TensorInfo input1TensorInfo({ 2, 5 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
 
-        // Batch 1
-        13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
-    }));
+            // Batch 1
+            13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        9.0f,
+    TensorInfo input2TensorInfo({ 2, 1 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            9.0f,
 
-        // Batch 1
-        18.0f
-    }));
+            // Batch 1
+            18.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 2, 9 }, ArmnnType, qScale, qOffset);
     LayerTestResult<T, 2> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -666,292 +696,33 @@
                    true);
 
     result.output = MakeTensor<T, 2>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0
-        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0
+            1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
 
-        // Batch 1
-        10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
-    }));
+            // Batch 1
+            10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 3> Concat3dTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo& outputTensorInfo,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const TensorInfo& outputTensorInfo,
     unsigned int dimension,
     bool useSubtensor,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
 
-    auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
-
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
-
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
-
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
-
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
-
-        // Batch 1, Channel 2
-        23.0f, 24.0f
-    }));
-
-    auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        7.0f, 8.0f,
-
-        // Batch 0, Channel 1
-        9.0f, 10.0f,
-
-        // Batch 0, Channel 2
-        11.0f, 12.0f,
-
-        // Batch 1, Channel 0
-        25.0f, 26.0f,
-
-        // Batch 1, Channel 1
-        27.0f, 28.0f,
-
-        // Batch 1, Channel 2
-        29.0f, 30.0f
-    }));
-
-    auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        13.0f, 14.0f,
-
-        // Batch 0, Channel 1
-        15.0f, 16.0f,
-
-        // Batch 0, Channel 2
-        17.0f, 18.0f,
-
-        // Batch 1, Channel 0
-        31.0f, 32.0f,
-
-        // Batch 1, Channel 1
-        33.0f, 34.0f,
-
-        // Batch 1, Channel 2
-        35.0f, 36.0f
-    }));
-
-    LayerTestResult<T, 3> result(outputTensorInfo);
-
-    std::vector<T> output;
-    output.resize(outputTensorInfo.GetNumElements());
-    Concatenate<T>(workloadFactory, memoryManager,
-                   { inputTensorInfo, inputTensorInfo, inputTensorInfo },
-                   { input0.data(), input1.data(), input2.data() },
-                   outputTensorInfo,
-                   output.data(),
-                   dimension,
-                   useSubtensor);
-
-    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
-    return result;
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Concat3dDim0TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
-
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
-
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
-
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
-
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
-
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
-
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
-
-        // Batch 2, Channel 0
-        7.0f, 8.0f,
-
-        // Batch 2, Channel 1
-        9.0f, 10.0f,
-
-        // Batch 2, Channel 2
-        11.0f, 12.0f,
-
-        // Batch 3, Channel 0
-        25.0f, 26.0f,
-
-        // Batch 3, Channel 1
-        27.0f, 28.0f,
-
-        // Batch 3, Channel 2
-        29.0f, 30.0f,
-
-        // Batch 4, Channel 0
-        13.0f, 14.0f,
-
-        // Batch 4, Channel 1
-        15.0f, 16.0f,
-
-        // Batch 4, Channel 2
-        17.0f, 18.0f,
-
-        // Batch 5, Channel 0
-        31.0f, 32.0f,
-
-        // Batch 5, Channel 1
-        33.0f, 34.0f,
-
-        // Batch 5, Channel 2
-        35.0f, 36.0f
-    }));
-
-    return result;
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Concat3dDim1TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
-
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
-
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
-
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
-
-        // Batch 0, Channel 3
-        7.0f, 8.0f,
-
-        // Batch 0, Channel 4
-        9.0f, 10.0f,
-
-        // Batch 0, Channel 5
-        11.0f, 12.0f,
-
-        // Batch 0, Channel 6
-        13.0f, 14.0f,
-
-        // Batch 0, Channel 7
-        15.0f, 16.0f,
-
-        // Batch 0, Channel 8
-        17.0f, 18.0f,
-
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
-
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
-
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
-
-        // Batch 1, Channel 3
-        25.0f, 26.0f,
-
-        // Batch 1, Channel 4
-        27.0f, 28.0f,
-
-        // Batch 1, Channel 5
-        29.0f, 30.0f,
-
-        // Batch 1, Channel 6
-        31.0f, 32.0f,
-
-        // Batch 1, Channel 7
-        33.0f, 34.0f,
-
-        // Batch 1, Channel 8
-        35.0f, 36.0f
-    }));
-
-    return result;
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Concat3dDim2TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    bool useSubtensor,
-    float qScale,
-    int32_t qOffset)
-{
-    armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
-
-    LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
-        workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
-
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
-
-        // Batch 0, Channel 1
-        3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
-
-        // Batch 0, Channel 2
-        5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
-
-        // Batch 1, Channel 0
-        19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
-
-        // Batch 1, Channel 1
-        21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
-
-        // Batch 1, Channel 2
-        23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
-    }));
-
-    return result;
-}
-
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
-LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    float qScale,
-    int32_t qOffset)
-{
-    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
-    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+    auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
+        {
             // Batch 0, Channel 0
             1.0f, 2.0f,
 
@@ -969,10 +740,11 @@
 
             // Batch 1, Channel 2
             23.0f, 24.0f
-    }));
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
-    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+    auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
+        {
             // Batch 0, Channel 0
             7.0f, 8.0f,
 
@@ -981,10 +753,287 @@
 
             // Batch 0, Channel 2
             11.0f, 12.0f,
-    }));
 
-    armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
-    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+            // Batch 1, Channel 0
+            25.0f, 26.0f,
+
+            // Batch 1, Channel 1
+            27.0f, 28.0f,
+
+            // Batch 1, Channel 2
+            29.0f, 30.0f
+        },
+        qScale, qOffset));
+
+    auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            13.0f, 14.0f,
+
+            // Batch 0, Channel 1
+            15.0f, 16.0f,
+
+            // Batch 0, Channel 2
+            17.0f, 18.0f,
+
+            // Batch 1, Channel 0
+            31.0f, 32.0f,
+
+            // Batch 1, Channel 1
+            33.0f, 34.0f,
+
+            // Batch 1, Channel 2
+            35.0f, 36.0f
+        },
+        qScale, qOffset));
+
+    LayerTestResult<T, 3> result(outputTensorInfo);
+
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory, memoryManager,
+                   { inputTensorInfo, inputTensorInfo, inputTensorInfo },
+                   { input0.data(), input1.data(), input2.data() },
+                   outputTensorInfo,
+                   output.data(),
+                   dimension,
+                   useSubtensor);
+
+    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
+    return result;
+}
+
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Concat3dDim0TestImpl(
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
+{
+    TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType, qScale, qOffset);
+
+    LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
+
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
+
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
+
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
+
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
+
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
+
+            // Batch 1, Channel 2
+            23.0f, 24.0f,
+
+            // Batch 2, Channel 0
+            7.0f, 8.0f,
+
+            // Batch 2, Channel 1
+            9.0f, 10.0f,
+
+            // Batch 2, Channel 2
+            11.0f, 12.0f,
+
+            // Batch 3, Channel 0
+            25.0f, 26.0f,
+
+            // Batch 3, Channel 1
+            27.0f, 28.0f,
+
+            // Batch 3, Channel 2
+            29.0f, 30.0f,
+
+            // Batch 4, Channel 0
+            13.0f, 14.0f,
+
+            // Batch 4, Channel 1
+            15.0f, 16.0f,
+
+            // Batch 4, Channel 2
+            17.0f, 18.0f,
+
+            // Batch 5, Channel 0
+            31.0f, 32.0f,
+
+            // Batch 5, Channel 1
+            33.0f, 34.0f,
+
+            // Batch 5, Channel 2
+            35.0f, 36.0f
+        },
+        qScale, qOffset));
+
+    return result;
+}
+
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Concat3dDim1TestImpl(
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
+{
+    TensorInfo outputTensorInfo({ 2, 9, 2 }, ArmnnType, qScale, qOffset);
+
+    LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
+
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
+
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
+
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
+
+            // Batch 0, Channel 3
+            7.0f, 8.0f,
+
+            // Batch 0, Channel 4
+            9.0f, 10.0f,
+
+            // Batch 0, Channel 5
+            11.0f, 12.0f,
+
+            // Batch 0, Channel 6
+            13.0f, 14.0f,
+
+            // Batch 0, Channel 7
+            15.0f, 16.0f,
+
+            // Batch 0, Channel 8
+            17.0f, 18.0f,
+
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
+
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
+
+            // Batch 1, Channel 2
+            23.0f, 24.0f,
+
+            // Batch 1, Channel 3
+            25.0f, 26.0f,
+
+            // Batch 1, Channel 4
+            27.0f, 28.0f,
+
+            // Batch 1, Channel 5
+            29.0f, 30.0f,
+
+            // Batch 1, Channel 6
+            31.0f, 32.0f,
+
+            // Batch 1, Channel 7
+            33.0f, 34.0f,
+
+            // Batch 1, Channel 8
+            35.0f, 36.0f
+        },
+        qScale, qOffset));
+
+    return result;
+}
+
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Concat3dDim2TestImpl(
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    bool useSubtensor,
+    float qScale,
+    int32_t qOffset)
+{
+    TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
+
+    LayerTestResult<T, 3> result = Concat3dTestImpl<ArmnnType>(
+        workloadFactory, memoryManager, outputTensorInfo, 2, useSubtensor, qScale, qOffset);
+
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
+
+            // Batch 0, Channel 1
+            3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
+
+            // Batch 0, Channel 2
+            5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
+
+            // Batch 1, Channel 0
+            19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
+
+            // Batch 1, Channel 1
+            21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
+
+            // Batch 1, Channel 2
+            23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
+        },
+        qScale, qOffset));
+
+    return result;
+}
+
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
+LayerTestResult<T, 3> Concat3dDim0DiffInputDimsTestImpl(
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    float qScale,
+    int32_t qOffset)
+{
+    TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType);
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
+
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
+
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
+
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
+
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
+
+            // Batch 1, Channel 2
+            23.0f, 24.0f
+        },
+        qScale, qOffset));
+
+    TensorInfo input1TensorInfo({ 1, 3, 2 }, ArmnnType);
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            7.0f, 8.0f,
+
+            // Batch 0, Channel 1
+            9.0f, 10.0f,
+
+            // Batch 0, Channel 2
+            11.0f, 12.0f,
+        },
+        qScale, qOffset));
+
+    TensorInfo input2TensorInfo({ 3, 3, 2 }, ArmnnType);
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
+        {
             // Batch 0, Channel 0
             25.0f, 26.0f,
 
@@ -1011,9 +1060,10 @@
 
             // Batch 2, Channel 2
             35.0f, 36.0f
-    }));
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
+    TensorInfo outputTensorInfo({ 6, 3, 2 }, ArmnnType);
     LayerTestResult<T, 3> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -1027,130 +1077,138 @@
                    true);
 
     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
+            // Batch 1, Channel 2
+            23.0f, 24.0f,
 
-        // Batch 2, Channel 0
-        7.0f, 8.0f,
+            // Batch 2, Channel 0
+            7.0f, 8.0f,
 
-        // Batch 2, Channel 1
-        9.0f, 10.0f,
+            // Batch 2, Channel 1
+            9.0f, 10.0f,
 
-        // Batch 2, Channel 2
-        11.0f, 12.0f,
+            // Batch 2, Channel 2
+            11.0f, 12.0f,
 
-        // Batch 3, Channel 0
-        25.0f, 26.0f,
+            // Batch 3, Channel 0
+            25.0f, 26.0f,
 
-        // Batch 3, Channel 1
-        27.0f, 28.0f,
+            // Batch 3, Channel 1
+            27.0f, 28.0f,
 
-        // Batch 3, Channel 2
-        29.0f, 30.0f,
+            // Batch 3, Channel 2
+            29.0f, 30.0f,
 
-        // Batch 4, Channel 0
-        13.0f, 14.0f,
+            // Batch 4, Channel 0
+            13.0f, 14.0f,
 
-        // Batch 4, Channel 1
-        15.0f, 16.0f,
+            // Batch 4, Channel 1
+            15.0f, 16.0f,
 
-        // Batch 4, Channel 2
-        17.0f, 18.0f,
+            // Batch 4, Channel 2
+            17.0f, 18.0f,
 
-        // Batch 5, Channel 0
-        31.0f, 32.0f,
+            // Batch 5, Channel 0
+            31.0f, 32.0f,
 
-        // Batch 5, Channel 1
-        33.0f, 34.0f,
+            // Batch 5, Channel 1
+            33.0f, 34.0f,
 
-        // Batch 5, Channel 2
-        35.0f, 36.0f
-    }));
+            // Batch 5, Channel 2
+            35.0f, 36.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 3> Concat3dDim1DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f
-    }));
+            // Batch 1, Channel 2
+            23.0f, 24.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        7.0f, 8.0f,
+    TensorInfo input1TensorInfo({ 2, 4, 2 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            7.0f, 8.0f,
 
-        // Batch 0, Channel 1
-        9.0f, 10.0f,
+            // Batch 0, Channel 1
+            9.0f, 10.0f,
 
-        // Batch 0, Channel 2
-        11.0f, 12.0f,
+            // Batch 0, Channel 2
+            11.0f, 12.0f,
 
-        // Batch 0, Channel 3
-        25.0f, 26.0f,
+            // Batch 0, Channel 3
+            25.0f, 26.0f,
 
-        // Batch 1, Channel 0
-        27.0f, 28.0f,
+            // Batch 1, Channel 0
+            27.0f, 28.0f,
 
-        // Batch 1, Channel 1
-        29.0f, 30.0f,
+            // Batch 1, Channel 1
+            29.0f, 30.0f,
 
-        // Batch 1, Channel 2
-        13.0f, 14.0f,
+            // Batch 1, Channel 2
+            13.0f, 14.0f,
 
-        // Batch 1, Channel 3
-        15.0f, 16.0f,
-    }));
+            // Batch 1, Channel 3
+            15.0f, 16.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        17.0f, 18.0f,
+    TensorInfo input2TensorInfo({ 2, 1, 2 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            17.0f, 18.0f,
 
-        // Batch 1, Channel 0
-        31.0f, 32.0f,
-    }));
+            // Batch 1, Channel 0
+            31.0f, 32.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 2, 8, 2 }, ArmnnType, qScale, qOffset);
     LayerTestResult<T, 3> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -1164,131 +1222,139 @@
                    true);
 
     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-        // Batch 0, Channel 3
-        7.0f, 8.0f,
+            // Batch 0, Channel 3
+            7.0f, 8.0f,
 
-        // Batch 0, Channel 4
-        9.0f, 10.0f,
+            // Batch 0, Channel 4
+            9.0f, 10.0f,
 
-        // Batch 0, Channel 5
-        11.0f, 12.0f,
+            // Batch 0, Channel 5
+            11.0f, 12.0f,
 
-        // Batch 0, Channel 6
-        25.0f, 26.0f,
+            // Batch 0, Channel 6
+            25.0f, 26.0f,
 
-        // Batch 0, Channel 7
-        17.0f, 18.0f,
+            // Batch 0, Channel 7
+            17.0f, 18.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f,
+            // Batch 1, Channel 2
+            23.0f, 24.0f,
 
-        // Batch 1, Channel 3
-        27.0f, 28.0f,
+            // Batch 1, Channel 3
+            27.0f, 28.0f,
 
-        // Batch 1, Channel 4
-        29.0f, 30.0f,
+            // Batch 1, Channel 4
+            29.0f, 30.0f,
 
-        // Batch 1, Channel 5
-        13.0f, 14.0f,
+            // Batch 1, Channel 5
+            13.0f, 14.0f,
 
-        // Batch 1, Channel 6
-        15.0f, 16.0f,
+            // Batch 1, Channel 6
+            15.0f, 16.0f,
 
-        // Batch 1, Channel 7
-        31.0f, 32.0f,
-    }));
+            // Batch 1, Channel 7
+            31.0f, 32.0f,
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 3> Concat3dDim2DiffInputDimsTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
-    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f,
+    TensorInfo input0TensorInfo({ 2, 3, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f
-    }));
+            // Batch 1, Channel 2
+            23.0f, 24.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
-    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        7.0f,
+    TensorInfo input1TensorInfo({ 2, 3, 1 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            7.0f,
 
-        // Batch 0, Channel 1
-        9.0f,
+            // Batch 0, Channel 1
+            9.0f,
 
-        // Batch 0, Channel 2
-        11.0f,
+            // Batch 0, Channel 2
+            11.0f,
 
-        // Batch 1, Channel 0
-        25.0f,
+            // Batch 1, Channel 0
+            25.0f,
 
-        // Batch 1, Channel 1
-        27.0f,
+            // Batch 1, Channel 1
+            27.0f,
 
-        // Batch 1, Channel 2
-        29.0f
-    }));
+            // Batch 1, Channel 2
+            29.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
-    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        13.0f, 14.0f, 50.0f,
+    TensorInfo input2TensorInfo({ 2, 3, 3 }, ArmnnType, qScale, qOffset);
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            13.0f, 14.0f, 50.0f,
 
-        // Batch 0, Channel 1
-        15.0f, 16.0f, 51.0f,
+            // Batch 0, Channel 1
+            15.0f, 16.0f, 51.0f,
 
-        // Batch 0, Channel 2
-        17.0f, 18.0f, 52.0f,
+            // Batch 0, Channel 2
+            17.0f, 18.0f, 52.0f,
 
-        // Batch 1, Channel 0
-        31.0f, 32.0f, 53.0f,
+            // Batch 1, Channel 0
+            31.0f, 32.0f, 53.0f,
 
-        // Batch 1, Channel 1
-        33.0f, 34.0f, 54.0f,
+            // Batch 1, Channel 1
+            33.0f, 34.0f, 54.0f,
 
-        // Batch 1, Channel 2
-        35.0f, 36.0f, 55.0f,
-    }));
+            // Batch 1, Channel 2
+            35.0f, 36.0f, 55.0f,
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 2, 3, 6 }, ArmnnType, qScale, qOffset);
     LayerTestResult<T, 3> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -1302,67 +1368,75 @@
                    useSubtensor);
 
     result.output = MakeTensor<T, 3>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0
+            1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
 
-        // Batch 0, Channel 1
-        3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
+            // Batch 0, Channel 1
+            3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
 
-        // Batch 0, Channel 2
-        5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
+            // Batch 0, Channel 2
+            5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
 
-        // Batch 1, Channel 0
-        19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
+            // Batch 1, Channel 0
+            19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
 
-        // Batch 1, Channel 1
-        21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
+            // Batch 1, Channel 1
+            21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
 
-        // Batch 1, Channel 2
-        23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
-    }));
+            // Batch 1, Channel 2
+            23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dTestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::TensorInfo& outputTensorInfo,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const TensorInfo& outputTensorInfo,
     unsigned int dimension,
     bool useSubtensor,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f
+        },
+        qScale, qOffset));
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f
-    }));
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
+        {
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f
+        },
+        qScale, qOffset));
 
-    auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
+    auto input2 = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
+        {
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
 
@@ -1382,197 +1456,209 @@
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDim0TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 0, true, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f,
 
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
 
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
+
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDim1TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 1, 9, 2, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 1, true, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f,
 
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
 
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDim2TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 1, 3, 6, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 2, true, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        21.0f, 22.0f,
-        23.0f, 24.0f,
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            21.0f, 22.0f,
+            23.0f, 24.0f,
 
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
 
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
+             9.0f, 10.0f,
+            11.0f, 12.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDim3TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset,
     bool useSubtensor)
 {
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 1, 3, 2, 6 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result = Concat4dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, outputTensorInfo, 3, useSubtensor, qScale, qOffset);
 
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        11.0f, 12.0f,
-        21.0f, 22.0f,
-        3.0f, 4.0f,
-        13.0f, 14.0f,
-        23.0f, 24.0f,
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+            11.0f, 12.0f,
+            21.0f, 22.0f,
+             3.0f,  4.0f,
+            13.0f, 14.0f,
+            23.0f, 24.0f,
 
-        5.0f, 6.0f,
-        15.0f, 16.0f,
-        25.0f, 26.0f,
-        7.0f, 8.0f,
-        17.0f, 18.0f,
-        27.0f, 28.0f,
+             5.0f,  6.0f,
+            15.0f, 16.0f,
+            25.0f, 26.0f,
+             7.0f,  8.0f,
+            17.0f, 18.0f,
+            27.0f, 28.0f,
 
-        9.0f, 10.0f,
-        19.0f, 20.0f,
-        29.0f, 30.0f,
-        11.0f, 12.0f,
-        21.0f, 22.0f,
-        31.0f, 32.0f
-    }));
+             9.0f, 10.0f,
+            19.0f, 20.0f,
+            29.0f, 30.0f,
+            11.0f, 12.0f,
+            21.0f, 22.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDiffShapeDim0TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    unsigned int dimension = 0;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    constexpr unsigned int dimension = 0u;
 
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
+    TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo1({ 2, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+        {
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
 
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
-    }));
-
-    armnn::TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 3, 3, 2, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result(outputTensorInfo);
 
@@ -1588,62 +1674,67 @@
                    true);
 
     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f, 2.0f,
+             3.0f, 4.0f,
+             5.0f, 6.0f,
+             7.0f, 8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f,
 
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
 
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f,
-        29.0f, 30.0f,
-        31.0f, 32.0f
-    }));
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f,
+            29.0f, 30.0f,
+            31.0f, 32.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDiffShapeDim1TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    unsigned int dimension = 1;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    constexpr unsigned int dimension = 1u;
 
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
+    TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo1({ 1, 2, 2, 2 }, ArmnnType, qScale, qOffset);
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+        {
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+        },
+        qScale, qOffset));
 
-    }));
-
-    armnn::TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 1, 5, 2, 2 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result(outputTensorInfo);
 
@@ -1659,57 +1750,61 @@
                    true);
 
     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f
-    }));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f,
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDiffShapeDim2TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset)
 {
-    unsigned int dimension = 2;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    constexpr unsigned int dimension = 2u;
 
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
+    TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+        {
+             1.0f, 2.0f,
+             3.0f, 4.0f,
+             5.0f, 6.0f,
+             7.0f, 8.0f,
+            9.0f, 10.0f,
+            11.0f, 12.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo1({ 1, 3, 3, 2 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+        {
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f
+        },
+        qScale, qOffset));
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f
-    }));
-
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
-
+    TensorInfo outputTensorInfo({ 1, 3, 5, 2 }, ArmnnType, qScale, qOffset);
     LayerTestResult<T, 4> result(outputTensorInfo);
 
     std::vector<T> output;
@@ -1723,64 +1818,69 @@
                    dimension,
                    true);
 
-    result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        11.0f, 12.0f,
-        13.0f, 14.0f,
-        15.0f, 16.0f,
+    result.output         = MakeTensor<T, 4>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+            11.0f, 12.0f,
+            13.0f, 14.0f,
+            15.0f, 16.0f,
 
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        17.0f, 18.0f,
-        19.0f, 20.0f,
-        21.0f, 22.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+            17.0f, 18.0f,
+            19.0f, 20.0f,
+            21.0f, 22.0f,
 
-        9.0f, 10.0f,
-        11.0f, 12.0f,
-        23.0f, 24.0f,
-        25.0f, 26.0f,
-        27.0f, 28.0f
-    }));
+             9.0f, 10.0f,
+            11.0f, 12.0f,
+            23.0f, 24.0f,
+            25.0f, 26.0f,
+            27.0f, 28.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+template<DataType ArmnnType, typename T = ResolveType<ArmnnType>>
 LayerTestResult<T, 4> Concat4dDiffShapeDim3TestImpl(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     float qScale,
     int32_t qOffset,
     bool useSubtensor)
 {
-    unsigned int dimension = 3;
-    armnn::TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    constexpr unsigned int dimension = 3u;
 
-    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f,
-        3.0f, 4.0f,
-        5.0f, 6.0f,
-        7.0f, 8.0f,
-        9.0f, 10.0f,
-        11.0f, 12.0f
-    }));
+    TensorInfo inputTensorInfo0({ 1, 3, 2, 2 }, ArmnnType, qScale, qOffset);
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, QuantizedVector<T>(
+        {
+             1.0f,  2.0f,
+             3.0f,  4.0f,
+             5.0f,  6.0f,
+             7.0f,  8.0f,
+             9.0f, 10.0f,
+            11.0f, 12.0f
+        },
+        qScale, qOffset));
 
-    armnn::TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
+    TensorInfo inputTensorInfo1({ 1, 3, 2, 3 }, ArmnnType, qScale, qOffset);
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(
+        {
+            11.0f, 12.0f, 13.0f,
+            14.0f, 15.0f, 16.0f,
 
-    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset, {
-        11.0f, 12.0f, 13.0f,
-        14.0f, 15.0f, 16.0f,
+            17.0f, 18.0f, 19.0f,
+            20.0f, 21.0f, 22.0f,
 
-        17.0f, 18.0f, 19.0f,
-        20.0f, 21.0f, 22.0f,
+            23.0f, 24.0f, 25.0f,
+            26.0f, 27.0f, 28.0f
+        },
+        qScale, qOffset));
 
-        23.0f, 24.0f, 25.0f,
-        26.0f, 27.0f, 28.0f
-    }));
-
-    armnn::TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
+    TensorInfo outputTensorInfo({ 1, 3, 2, 5 }, ArmnnType, qScale, qOffset);
 
     LayerTestResult<T, 4> result(outputTensorInfo);
 
@@ -1796,30 +1896,32 @@
                    useSubtensor);
 
     result.output = MakeTensor<T, 4>(outputTensorInfo, output);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
-        1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
-        3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
-        5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
-        7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
-        9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
-        11.0f, 12.0f, 26.0f, 27.0f, 28.0f
-    }));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
+        {
+            1.0f, 2.0f, 11.0f, 12.0f, 13.0f,
+            3.0f, 4.0f, 14.0f, 15.0f, 16.0f,
+            5.0f, 6.0f, 17.0f, 18.0f, 19.0f,
+            7.0f, 8.0f, 20.0f, 21.0f, 22.0f,
+            9.0f, 10.0f, 23.0f, 24.0f, 25.0f,
+            11.0f, 12.0f, 26.0f, 27.0f, 28.0f
+        },
+        qScale, qOffset));
 
     return result;
 }
 
-template<armnn::DataType ArmnnType, typename T>
+template<DataType ArmnnType, typename T>
 LayerTestResult<T, 3> ConcatDifferentInputOutputQParamTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
     // Defines the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
-    armnn::TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
-    armnn::TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
+    TensorInfo outputTensorInfo({ 3, 6, 3 }, ArmnnType);
+    TensorInfo inputTensorInfo1({ 3, 6, 2 }, ArmnnType);
+    TensorInfo inputTensorInfo2({ 3, 6, 1 }, ArmnnType);
 
-    std::vector<armnn::TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
+    std::vector<TensorShape> inputTensorShapes({inputTensorInfo1.GetShape(), inputTensorInfo2.GetShape()});
 
     // Quantized input1 tensor.
     const float inputScale1 = 0.5f;
@@ -1894,31 +1996,31 @@
     inputTensorInfo2.SetQuantizationOffset(inputOffset2);
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
+    ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = { 0, 0, 2 }; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
+    ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     bool subTensorsSupported = useSubtensor && workloadFactory.SupportsSubTensors();
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+    std::unique_ptr<ITensorHandle> inputHandle1 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+    std::unique_ptr<ITensorHandle> inputHandle2 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
-    armnn::ConcatQueueDescriptor data;
-    armnn::OriginsDescriptor desc = armnn::CreateDescriptorForConcatenation(
+    ConcatQueueDescriptor data;
+    OriginsDescriptor desc = CreateDescriptorForConcatenation(
             inputTensorShapes.begin(),inputTensorShapes.end(), 2);
     data.m_Parameters = desc;
 
-    armnn::WorkloadInfo info;
+    WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -1926,7 +2028,7 @@
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -1947,16 +2049,16 @@
 // Explicit template specializations
 //
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 3>
-ConcatDifferentInputOutputQParamTest<armnn::DataType::QuantisedAsymm8>(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+template LayerTestResult<ResolveType<DataType::QuantisedAsymm8>, 3>
+ConcatDifferentInputOutputQParamTest<DataType::QuantisedAsymm8>(
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor);
 
-template LayerTestResult<armnn::ResolveType<armnn::DataType::QuantisedSymm16>, 3>
-ConcatDifferentInputOutputQParamTest<armnn::DataType::QuantisedSymm16>(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+template LayerTestResult<ResolveType<DataType::QuantisedSymm16>, 3>
+ConcatDifferentInputOutputQParamTest<DataType::QuantisedSymm16>(
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor);
 
 //
@@ -1964,8 +2066,8 @@
 //
 
 LayerTestResult<float,3> ConcatTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
@@ -1980,9 +2082,9 @@
     unsigned int inputChannels2 = 1;
 
     // Define the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
-    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
-    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
+    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::Float32);
+    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::Float32);
+    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::Float32);
 
     LayerTestResult<float,3> ret(outputTensorInfo);
 
@@ -2041,27 +2143,27 @@
     );
 
     std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
+    ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
+    ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+    std::unique_ptr<ITensorHandle> inputHandle1 =
         subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2  =
+    std::unique_ptr<ITensorHandle> inputHandle2  =
         subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
-    armnn::ConcatQueueDescriptor data;
-    armnn::WorkloadInfo info;
+    ConcatQueueDescriptor data;
+    WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -2069,7 +2171,7 @@
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2087,156 +2189,156 @@
 }
 
 LayerTestResult<float, 1> Concat1dTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat1dTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat1dTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 2> Concat2dDim0Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat2dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 2> Concat2dDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat2dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 2> Concat2dDim0DiffInputDimsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat2dDim0DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 2> Concat2dDim1DiffInputDimsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat2dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim0Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat3dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat3dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim2Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat3dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
+    return Concat3dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim0DiffInputDimsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim0DiffInputDimsTestImpl<armnn::DataType::Float32>(
+    return Concat3dDim0DiffInputDimsTestImpl<DataType::Float32>(
         workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim1DiffInputDimsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1DiffInputDimsTestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat3dDim1DiffInputDimsTestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 3> Concat3dDim2DiffInputDimsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat3dDim2DiffInputDimsTestImpl<armnn::DataType::Float32>(
+    return Concat3dDim2DiffInputDimsTestImpl<DataType::Float32>(
         workloadFactory, memoryManager, useSubtensor, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDim0Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat4dDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim1TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat4dDim1TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDim2Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat4dDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDim3Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat4dDim3TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
+    return Concat4dDim3TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
 }
 
 LayerTestResult<float, 4> Concat4dDiffShapeDim0Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim0TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat4dDiffShapeDim0TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDiffShapeDim1Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim1TestImpl<armnn::DataType::Float32>(
+    return Concat4dDiffShapeDim1TestImpl<DataType::Float32>(
         workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDiffShapeDim2Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim2TestImpl<armnn::DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat4dDiffShapeDim2TestImpl<DataType::Float32>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<float, 4> Concat4dDiffShapeDim3Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat4dDiffShapeDim3TestImpl<armnn::DataType::Float32>(
+    return Concat4dDiffShapeDim3TestImpl<DataType::Float32>(
         workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
 }
 
-LayerTestResult<armnn::Half, 3> ConcatFloat16Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+LayerTestResult<Half, 3> ConcatFloat16Test(
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1TestImpl<armnn::DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
+    return Concat3dDim1TestImpl<DataType::Float16>(workloadFactory, memoryManager, 0.0f, 0);
 }
 
 LayerTestResult<uint8_t, 3> ConcatUint8DifferentQParamsTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
@@ -2251,9 +2353,9 @@
     unsigned int inputChannels2 = 1;
 
     // Defines the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
+    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
+    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
+    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
 
     // Quantized input1 tensor. Range [-3, 1]
     const float inputScale1 = 0.015686f;
@@ -2332,27 +2434,27 @@
     inputTensorInfo2.SetQuantizationOffset(inputOffset2);
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
+    ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
+    ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+    std::unique_ptr<ITensorHandle> inputHandle1 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+    std::unique_ptr<ITensorHandle> inputHandle2 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
-    armnn::ConcatQueueDescriptor data;
-    armnn::WorkloadInfo info;
+    ConcatQueueDescriptor data;
+    WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -2360,7 +2462,7 @@
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2378,8 +2480,8 @@
 }
 
 LayerTestResult<uint8_t, 3> ConcatUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
@@ -2394,9 +2496,9 @@
     unsigned int inputChannels2 = 1;
 
     // Defines the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
-    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
+    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedAsymm8);
+    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedAsymm8);
+    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedAsymm8);
 
     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
     const float scale = 0.13497836f;
@@ -2466,29 +2568,29 @@
     );
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
+    ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
+    ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+    std::unique_ptr<ITensorHandle> inputHandle1 =
         subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+    std::unique_ptr<ITensorHandle> inputHandle2 =
         subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
 
-    armnn::ConcatQueueDescriptor data;
-    armnn::WorkloadInfo info;
+    ConcatQueueDescriptor data;
+    WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -2496,7 +2598,7 @@
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2514,8 +2616,8 @@
 }
 
 LayerTestResult<uint16_t, 3> ConcatUint16Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+        IWorkloadFactory& workloadFactory,
+        const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
     unsigned int outputWidth = 3;
     unsigned int outputHeight = 6;
@@ -2530,9 +2632,9 @@
     unsigned int inputChannels2 = 1;
 
     // Defines the tensor descriptors.
-    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedSymm16);
-    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedSymm16);
-    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedSymm16);
+    TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, DataType::QuantisedSymm16);
+    TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, DataType::QuantisedSymm16);
+    TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, DataType::QuantisedSymm16);
 
     // Arbitrary scale and offsets. They don't really matter as the Concat operator doesn't dequantize/quantize them.
     const float scale = 0.13497836f;
@@ -2599,29 +2701,29 @@
     }));
 
     std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
-    armnn::ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
+    ConcatQueueDescriptor::ViewOrigin window1(wOrigin1);
 
     std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
-    armnn::ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
+    ConcatQueueDescriptor::ViewOrigin window2(wOrigin2);
 
 
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     bool subTensorsSupported = workloadFactory.SupportsSubTensors();
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+    std::unique_ptr<ITensorHandle> inputHandle1 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo1);
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+    std::unique_ptr<ITensorHandle> inputHandle2 =
             subTensorsSupported ?
             workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
             workloadFactory.CreateTensorHandle(inputTensorInfo2);
 
 
-    armnn::ConcatQueueDescriptor data;
-    armnn::WorkloadInfo info;
+    ConcatQueueDescriptor data;
+    WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
     AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
     AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
@@ -2629,7 +2731,7 @@
     data.m_ViewOrigins.push_back(window1);
     data.m_ViewOrigins.push_back(window2);
 
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConcat(data, info);
+    std::unique_ptr<IWorkload> workload = workloadFactory.CreateConcat(data, info);
 
     inputHandle1->Allocate();
     inputHandle2->Allocate();
@@ -2647,147 +2749,147 @@
 }
 
 LayerTestResult<uint8_t, 1> Concat1dUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat1dTestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat1dTestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 2> Concat2dDim0Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat2dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 2> Concat2dDim1Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat2dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 2> Concat2dDim0DiffInputDimsUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim0DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat2dDim0DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 2> Concat2dDim1DiffInputDimsUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat2dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat2dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim0Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim1Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat3dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim2Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat3dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat3dDim2TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim0DiffInputDimsUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat3dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim1DiffInputDimsUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat3dDim1DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat3dDim1DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 3> Concat3dDim2DiffInputDimsUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat3dDim2DiffInputDimsTestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat3dDim2DiffInputDimsTestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, useSubtensor, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDim0Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim0TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat4dDim0TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDim1Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim1TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat4dDim1TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDim2Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDim2TestImpl<armnn::DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
+    return Concat4dDim2TestImpl<DataType::QuantisedAsymm8>(workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDim3Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager, bool useSubtensor)
 {
-    return Concat4dDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat4dDim3TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim0Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim0TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim0TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim1Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim1TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim1TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim2Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
 {
-    return Concat4dDiffShapeDim2TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim2TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1);
 }
 
 LayerTestResult<uint8_t, 4> Concat4dDiffShapeDim3Uint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    IWorkloadFactory& workloadFactory,
+    const IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
     bool useSubtensor)
 {
-    return Concat4dDiffShapeDim3TestImpl<armnn::DataType::QuantisedAsymm8>(
+    return Concat4dDiffShapeDim3TestImpl<DataType::QuantisedAsymm8>(
         workloadFactory, memoryManager, 0.5f, -1, useSubtensor);
 }
diff --git a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
index c3cacd5..3f22c31 100644
--- a/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConstantTestImpl.cpp
@@ -6,6 +6,7 @@
 #include "ConstantTestImpl.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -53,43 +54,45 @@
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
-        // Batch 0, Channel 0
-        235.0f,  46.0f, 178.0f,
-        100.0f, 123.0f,  19.0f,
-        172.0f,  74.0f, 250.0f,
-          6.0f, 195.0f,  80.0f,
+        armnnUtils::QuantizedVector<T>(
+            {
+                // Batch 0, Channel 0
+                235.0f,  46.0f, 178.0f,
+                100.0f, 123.0f,  19.0f,
+                172.0f,  74.0f, 250.0f,
+                  6.0f, 195.0f,  80.0f,
 
-        // Batch 0, Channel 1
-        113.0f,  95.0f, 202.0f,
-         77.0f, 114.0f,  71.0f,
-        122.0f, 246.0f, 166.0f,
-         82.0f,  28.0f,  37.0f,
+                // Batch 0, Channel 1
+                113.0f,  95.0f, 202.0f,
+                 77.0f, 114.0f,  71.0f,
+                122.0f, 246.0f, 166.0f,
+                 82.0f,  28.0f,  37.0f,
 
-        // Batch 0, Channel 2
-         56.0f, 170.0f, 162.0f,
-        194.0f,  89.0f, 254.0f,
-         12.0f, 209.0f, 200.0f,
-          1.0f,  64.0f,  54.0f,
+                // Batch 0, Channel 2
+                 56.0f, 170.0f, 162.0f,
+                194.0f,  89.0f, 254.0f,
+                 12.0f, 209.0f, 200.0f,
+                  1.0f,  64.0f,  54.0f,
 
-        // Batch 1, Channel 0
-         67.0f,  90.0f,  49.0f,
-          7.0f, 163.0f,  18.0f,
-         25.0f, 117.0f, 103.0f,
-        247.0f,  59.0f, 189.0f,
+                // Batch 1, Channel 0
+                 67.0f,  90.0f,  49.0f,
+                  7.0f, 163.0f,  18.0f,
+                 25.0f, 117.0f, 103.0f,
+                247.0f,  59.0f, 189.0f,
 
-        // Batch 1, Channel 1
-        239.0f, 104.0f, 199.0f,
-         17.0f, 124.0f, 153.0f,
-        222.0f, 217.0f, 75.0f,
-         32.0f, 126.0f, 21.0f,
+                // Batch 1, Channel 1
+                239.0f, 104.0f, 199.0f,
+                 17.0f, 124.0f, 153.0f,
+                222.0f, 217.0f, 75.0f,
+                 32.0f, 126.0f, 21.0f,
 
-        // Batch 1, Channel 2
-         97.0f, 145.0f, 215.0f,
-        115.0f, 116.0f, 238.0f,
-        226.0f,  16.0f, 132.0f,
-         92.0f, 125.0f,  88.0f,
-    })));
+                // Batch 1, Channel 2
+                 97.0f, 145.0f, 215.0f,
+                115.0f, 116.0f, 238.0f,
+                226.0f,  16.0f, 132.0f,
+                 92.0f, 125.0f,  88.0f,
+            },
+            qScale, qOffset)));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = input;
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 01c1b18..198904e 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -7,13 +7,13 @@
 
 #include <DataLayoutIndexed.hpp>
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <TensorUtils.hpp>
 
 #include <armnn/ArmNN.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -62,6 +62,8 @@
     -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
 });
 
+using namespace armnnUtils;
+
 //
 // Helper templates
 //
@@ -73,7 +75,7 @@
     if(biasEnabled)
     {
         armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, ArmnnType);
-        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias2));
+        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias2, qScale, 0.0f));
         return bias;
     }
     else
@@ -89,7 +91,7 @@
     if(biasEnabled)
     {
         armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias4.size())}, ArmnnType);
-        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias4));
+        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias4, qScale, 0.0f));
         return bias;
     }
     else
@@ -105,7 +107,7 @@
     if(biasEnabled)
     {
         armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias4.size())}, ArmnnType);
-        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, 0.0f, Bias8));
+        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(Bias8, qScale, 0.0f));
         return bias;
     }
     else
@@ -492,35 +494,39 @@
         biasInfo.SetQuantizationOffset(0);
     }
 
-    std::vector<T> inputData(
-        QuantizedVector<T>(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), {
-            5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
-            -3.0f, 3.2f, 5.0f, 2.0f, 3.0f,
-        }));
+    std::vector<T> inputData = QuantizedVector<T>(
+        {
+             5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
+            -3.0f,  3.2f, 5.0f, 2.0f, 3.0f,
+        },
+        inputInfo.GetQuantizationScale(),
+        inputInfo.GetQuantizationOffset());
 
-    std::vector<T> kernelData(
-        QuantizedVector<T>(kernelInfo.GetQuantizationScale(), kernelInfo.GetQuantizationOffset(), {
-            1.0f, 0.0f, 0.0f,
-            0.0f, 2.0f, -1.5f,
+    std::vector<T> kernelData = QuantizedVector<T>(
+        {
+            1.0f,  0.0f,  0.0f,
+            0.0f,  2.0f, -1.5f,
 
-            0.0f, 0.0f, 0.0f,
-            0.2f, 0.2f, 0.2f,
+            0.0f,  0.0f,  0.0f,
+            0.2f,  0.2f,  0.2f,
 
-            0.5f, 0.0f, 0.5f,
-            0.0f, -1.0f, 0.0f
-        }));
+            0.5f,  0.0f,  0.5f,
+            0.0f, -1.0f,  0.0f
+        },
+        kernelInfo.GetQuantizationScale(),
+        kernelInfo.GetQuantizationOffset());
 
-    std::vector<B> biasData(
-        QuantizedVector<B>(biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(), {
-            1.0f, 0.0f, 0.0f
-        }));
+    std::vector<B> biasData =
+        QuantizedVector<B>({ 1.0f, 0.0f, 0.0f }, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset());
 
-    std::vector<T> outputData(
-        QuantizedVector<T>(outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), {
-            4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
+    std::vector<T> outputData = QuantizedVector<T>(
+        {
+             4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
             -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f,
-            2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
-        }));
+             2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
+        },
+        outputInfo.GetQuantizationScale(),
+        outputInfo.GetQuantizationOffset());
 
     // Optionally apply bias to output image.
     if(biasEnabled)
@@ -698,54 +704,55 @@
 {
     // Use common single-batch 3-channel 16x8 image.
     armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
-    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset));
 
     // Use a 2-element batch with 3-channel 3x5 kernels.
     armnn::TensorInfo kernelDesc({2, 3, 5, 3}, ArmnnType);
     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
-            1, 1, 1,
+        QuantizedVector<T>({
+            1,  1, 1,
             1, -1, 1,
-            1, 1, 1,
-            1, 1, 1,
-            1, 1, 1,
+            1,  1, 1,
+            1,  1, 1,
+            1,  1, 1,
 
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
 
-            2, 2, 2,
-            2, 2, 2,
-            2, 2, 2,
-            2, 2, 2,
-            2, 2, 2,
+            2,  2, 2,
+            2,  2, 2,
+            2,  2, 2,
+            2,  2, 2,
+            2,  2, 2,
 
 
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
 
-            1, 1, 1,
-            1, 1, 1,
-            1, 1, 1,
-            1, 1, 1,
-            1, 1, 1,
+            1,  1, 1,
+            1,  1, 1,
+            1,  1, 1,
+            1,  1, 1,
+            1,  1, 1,
 
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0
-        })));
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0
+        },
+        qScale, qOffset)));
 
     // Expected output is 2 batch elements of a 1-channel 14x4 image.
     armnn::TensorInfo outputDesc({1, 2, 4, 14}, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
             -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
             -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
@@ -757,7 +764,8 @@
             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
             5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-        })));
+        },
+        qScale, qOffset)));
 
     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -785,42 +793,43 @@
 
     // Use common single-batch 3-channel 16x8 image.
     armnn::TensorInfo inputDesc({1, 3, 8, 16}, ArmnnType);
-    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(ConvInput3x8x16, qScale, qOffset));
 
     // Use a 2-element batch of 3-channel 3x3 kernels.
     armnn::TensorInfo kernelDesc({2, 3, 3, 3}, ArmnnType);
     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
-            1, 1, 1,
+        QuantizedVector<T>({
+            1,  1, 1,
             1, -1, 1,
-            1, 1, 1,
+            1,  1, 1,
 
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
 
-            2, 2, 2,
-            2, 2, 2,
-            2, 2, 2,
+            2,  2, 2,
+            2,  2, 2,
+            2,  2, 2,
 
 
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0,
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0,
 
-            1, 1, 1,
-            1, 1, 1,
-            1, 1, 1,
+            1,  1, 1,
+            1,  1, 1,
+            1,  1, 1,
 
-            0, 0, 0,
-            0, 0, 0,
-            0, 0, 0
-        })));
+            0,  0, 0,
+            0,  0, 0,
+            0,  0, 0
+        },
+        qScale, qOffset)));
 
     // Expected output is 1 batch of a 2-channel 14x6 image.
     armnn::TensorInfo outputDesc({1, 2, 6, 14}, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
             -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
             -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
@@ -834,7 +843,8 @@
             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
             3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-        })));
+        },
+        qScale, qOffset)));
 
     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -860,19 +870,21 @@
     // Use a single-batch 1-channel 3x3 image as input.
     armnn::TensorInfo inputDesc({1, 1, 3, 3}, ArmnnType);
     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             11,21,31,
             12,22,32,
             13,23,33
-        })));
+        },
+        qScale, qOffset)));
 
     // Use 1 batch of a 1-channel 2x2 kernel.
     armnn::TensorInfo kernelDesc({1, 1, 2, 2}, ArmnnType);
     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -11,-21,
             -12,-22,
-        })));
+        },
+        qScale, qOffset)));
 
 // Expected output is 1 batch of a 1-channel 6x8 image.
 // Manually calculated like this:
@@ -885,7 +897,7 @@
 //[..... .....  ..... .....  ; .....  .....  .....  .....  ; .....  .....  .....  .....  ; .....  ..... .....  ..... ..]
     armnn::TensorInfo outputDesc({1, 1, 8, 6}, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
                0,    0,      0,    0,    0,    0,
             -242,  -594,  -934, -372,    0,    0,
             -495, -1190, -1850, -725,    0,    0,
@@ -894,7 +906,8 @@
                0,    0,     0,     0,    0,    0,
                0,    0,     0,     0,    0,    0,
                0,    0,     0,     0,    0,    0
-        })));
+        },
+        qScale, qOffset)));
 
     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -924,35 +937,37 @@
     // Use a single-batch 1-channel 5x5 image as input.
     armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, ArmnnType);
     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             11,21,31,41,51,
             12,22,32,42,52,
             13,23,33,43,53,
             14,24,34,44,54,
             15,25,35,45,55,
-        })));
+        }, qScale, qOffset)));
 
     // Use 1 batch of a 1-channel 4x4 kernel.
     armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, ArmnnType);
     boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -11,-21,-31,-41,
             -12,-22,-32,-42,
             -13,-23,-33,-43,
             -14,-24,-34,-44,
-        })));
+        },
+        qScale, qOffset)));
 
     // Expected output is 1 batch of a 1-channel 5x5 image.
     armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, ArmnnType);
     std::vector<T> myVec(outputDesc.GetNumElements(), 0);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -7140, -10580, -13940,  -9300, -5230,
             -9590, -14120, -18520, -12290, -6860,
             -9980, -14560, -18960, -12560, -7000,
             -7518, -10904, -14144,  -9318, -5152,
             -5032,  -7256,  -9376,  -6142, -3368,
-        })));
+        },
+        qScale, qOffset)));
 
     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -1025,17 +1040,18 @@
     outputTensorInfo.SetQuantizationOffset(qOffset);
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-                                  std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                    inputTensorInfo.GetQuantizationOffset(),
-                                                                    inputNoQuantizedValues)));
+                                  std::vector<T>(QuantizedVector<T>(inputNoQuantizedValues,
+                                                                    inputTensorInfo.GetQuantizationScale(),
+                                                                    inputTensorInfo.GetQuantizationOffset())));
     auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
-                                  std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
-                                                                    kernelTensorInfo.GetQuantizationOffset(),
-                                                                    kernelNoQuantizedValues)));
-    auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
-                                           std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                             outputTensorInfo.GetQuantizationOffset(),
-                                                                             outputExpectedNoQuantizedValues)));
+                                  std::vector<T>(QuantizedVector<T>(kernelNoQuantizedValues,
+                                                                    kernelTensorInfo.GetQuantizationScale(),
+                                                                    kernelTensorInfo.GetQuantizationOffset())));
+    auto expectedOutput =
+        MakeTensor<T, 4>(outputTensorInfo,
+                         std::vector<T>(QuantizedVector<T>(outputExpectedNoQuantizedValues,
+                                                           outputTensorInfo.GetQuantizationScale(),
+                                                           outputTensorInfo.GetQuantizationOffset())));
 
     return SimpleConvolution2dTestImpl<ArmnnType, ArmnnBType>(
             workloadFactory,
@@ -1539,15 +1555,18 @@
         biasDesc.SetQuantizationOffset(0);
     }
     std::vector<T> inputData = std::vector<T>(
-            QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
-                    1.f, 2.f, 1.f,
-                    2.f, 1.f, 2.f,
-                    1.f, 2.f, 1.f,
+            QuantizedVector<T>({
+                1.f, 2.f, 1.f,
+                2.f, 1.f, 2.f,
+                1.f, 2.f, 1.f,
 
-                    1.f, 2.f, 1.f,
-                    2.f, 1.f, 2.f,
-                    1.f, 2.f, 1.f,
-            }));
+                1.f, 2.f, 1.f,
+                2.f, 1.f, 2.f,
+                1.f, 2.f, 1.f,
+            },
+            inputTensorInfo.GetQuantizationScale(),
+            inputTensorInfo.GetQuantizationOffset()));
+
     // at this point if we require it permute the input data
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
     if (layout == armnn::DataLayout::NHWC)
@@ -1558,27 +1577,32 @@
     }
     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
 
-    std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
-                                            {0, 2}));
+    std::vector<B> biasV(QuantizedVector<B>({ 0, 2 },
+                                            biasDesc.GetQuantizationScale(),
+                                            biasDesc.GetQuantizationOffset()));
+
     auto bias = MakeTensor<B, 1>(biasDesc, biasV);
 
     std::vector<T> kernelData = std::vector<T>(
-            QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), {
-                    1.f, 0.f,  1.f,
-                    0.f, 0.f,  0.f,
-                    -1.f, 0.f, -1.f,
+            QuantizedVector<T>({
+                 1.f, 0.f,  1.f,
+                 0.f, 0.f,  0.f,
+                -1.f, 0.f, -1.f,
 
-                    1.f, 0.f,  1.f,
-                    0.f, 0.f,  0.f,
-                    -1.f, 0.f, -1.f,
-            }));
+                 1.f, 0.f,  1.f,
+                 0.f, 0.f,  0.f,
+                -1.f, 0.f, -1.f,
+            },
+            kernelDesc.GetQuantizationScale(),
+            kernelDesc.GetQuantizationOffset()));
+
     auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
 
     // Manually calculated.
     std::vector<T> outputImage(
-        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                           outputTensorInfo.GetQuantizationOffset(),
-                           {0.f, 0.f})
+        QuantizedVector<T>({ 0.f, 0.f },
+                           outputTensorInfo.GetQuantizationScale(),
+                           outputTensorInfo.GetQuantizationOffset())
     );
 
     // Optionally apply bias to output image.
@@ -1686,24 +1710,27 @@
 
     // NOTE: originalInputData is in NCHW format
     std::vector<T> originalInputData = std::vector<T>(
-            QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-            }));
+            QuantizedVector<T>({
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f
+            },
+            inputTensorInfo.GetQuantizationScale(),
+            inputTensorInfo.GetQuantizationOffset()));
+
     std::vector<T> inputData = originalInputData;
     // at this point if we require it permute the input data
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
@@ -1714,70 +1741,76 @@
     }
     auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
 
-    std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
-        {0, 2, 1, -1}));
+    std::vector<B> biasV = QuantizedVector<B>({ 0, 2, 1, -1 },
+                                              biasDesc.GetQuantizationScale(),
+                                              biasDesc.GetQuantizationOffset());
+
     auto bias = MakeTensor<B, 1>(biasDesc, biasV);
 
     std::vector<T> kernelData = std::vector<T>(
-            QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), {
-                    1, 1, 1,
-                    1, -1, 1,
-                    1, 1, 1,
-                    1, 1, 1,
-                    1, 1, 1,
+            QuantizedVector<T>({
+                1,  1, 1,
+                1, -1, 1,
+                1,  1, 1,
+                1,  1, 1,
+                1,  1, 1,
 
-                    2, 2, 2,
-                    2, 2, 2,
-                    2, 2, 2,
-                    2, 2, 2,
-                    2, 2, 2,
+                2,  2, 2,
+                2,  2, 2,
+                2,  2, 2,
+                2,  2, 2,
+                2,  2, 2,
 
-                    0, 0, 0,
-                    0, -1, 0,
-                    0, 0, 0,
-                    0, 0, 0,
-                    0, 0, 0,
+                0,  0, 0,
+                0, -1, 0,
+                0,  0, 0,
+                0,  0, 0,
+                0,  0, 0,
 
-                    0, 0, 0,
-                    0, 0, 0,
-                    0, 1, 0,
-                    0, 0, 0,
-                    0, 0, 0
+                0,  0, 0,
+                0,  0, 0,
+                0,  1, 0,
+                0,  0, 0,
+                0,  0, 0
+            },
+            kernelDesc.GetQuantizationScale(),
+            kernelDesc.GetQuantizationOffset()));
 
-            }));
     auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
 
     // Manually calculated.
     std::vector<T> originalOutputImage = std::vector<T>(
-        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
-            3.5f,  3.5f,  3.5f,  3.5f,  3.5f,  3.5f,  3.5f,
-            6.0f,  6.0f,  6.0f,  6.0f,  6.0f,  6.0f,  6.0f,
-            5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,
-            6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,
-            6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,
-            5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,
+        QuantizedVector<T>({
+             3.5f,  3.5f,  3.5f,  3.5f,  3.5f,  3.5f,  3.5f,
+             6.0f,  6.0f,  6.0f,  6.0f,  6.0f,  6.0f,  6.0f,
+             5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,
+             6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,
+             6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,
+             5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,
 
             -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
             -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
             -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
             -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
             -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
 
-            8.0f,  8.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             8.0f,  8.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
             10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
             10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
             10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
             10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            8.0f,  8.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             8.0f,  8.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
 
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
-            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f
-        }));
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+             0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f
+        },
+        outputTensorInfo.GetQuantizationScale(),
+        outputTensorInfo.GetQuantizationOffset()));
 
     // Optionally apply bias to output image.
     if(biasEnabled)
@@ -2016,8 +2049,7 @@
     // Use a single-batch 2-channel 5x5 image as input.
     armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
              0,  1,  2,  3,  4,
              5,  6,  7,  8,  9,
             10, 11, 12, 13, 14,
@@ -2029,13 +2061,14 @@
             35, 36, 37, 38, 39,
             40, 41, 42, 43, 44,
             45, 46, 47, 48, 49
-        })));
+        },
+        inputTensorInfo.GetQuantizationScale(),
+        inputTensorInfo.GetQuantizationOffset())));
 
     // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
     armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
-        QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
             32, 31, 30, 29,
             28, 27, 26, 25,
             24, 23, 22, 21,
@@ -2045,14 +2078,15 @@
             12, 11, 10,  9,
              8,  7,  6,  5,
              4,  3,  2,  1
-        })));
+        },
+        kernelTensorInfo.GetQuantizationScale(),
+        kernelTensorInfo.GetQuantizationOffset())));
 
     // Expected output is 1 batch of a 2-channel 5x5 image.
     // Calculated using the python tensorflow library with strideX=1, strideY=1.
     armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
             1062, 1580, 1850, 1530, 1117,
             2140, 3108, 3500, 2842, 2042,
             3580, 5068, 5460, 4342, 3062,
@@ -2064,7 +2098,9 @@
             3390, 4886, 5022, 4068, 2916,
             3566, 5056, 5182, 4133, 2922,
             3100, 4352, 4452, 3517, 2465
-        })));
+        },
+        outputTensorInfo.GetQuantizationScale(),
+        outputTensorInfo.GetQuantizationOffset())));
 
     return DepthwiseConvolution2dAsymmetricTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -2097,8 +2133,7 @@
 
     armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
              0,  1,  2,  3,  4,
              5,  6,  7,  8,  9,
             10, 11, 12, 13, 14,
@@ -2110,12 +2145,13 @@
             35, 36, 37, 38, 39,
             40, 41, 42, 43, 44,
             45, 46, 47, 48, 49
-        })));
+        },
+        inputTensorInfo.GetQuantizationScale(),
+        inputTensorInfo.GetQuantizationOffset())));
 
     armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, ArmnnType);
     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
-        QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
              32, 31, 30, 29,
              28, 27, 26, 25,
              24, 23, 22, 21,
@@ -2125,12 +2161,13 @@
              12, 11, 10,  9,
               8,  7,  6,  5,
               4,  3,  2,  1
-        })));
+        },
+        kernelTensorInfo.GetQuantizationScale(),
+        kernelTensorInfo.GetQuantizationOffset())));
 
     armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5}, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
-        {
+        QuantizedVector<T>({
             1062, 1580, 1850, 1530, 1117,
             2140, 3108, 3500, 2842, 2042,
             3580, 5068, 5460, 4342, 3062,
@@ -2142,7 +2179,9 @@
             3390, 4886, 5022, 4068, 2916,
             3566, 5056, 5182, 4133, 2922,
             3100, 4352, 4452, 3517, 2465
-        })));
+        },
+        outputTensorInfo.GetQuantizationScale(),
+        outputTensorInfo.GetQuantizationOffset())));
 
     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -2175,27 +2214,29 @@
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9}, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(),
-        {
-             0, 0, 0, 0, 0, 0, 0, 0, 0,
-             0, 0, 0, 0, 0, 0, 0, 0, 0,
-             0, 0, 0, 0, 0, 0, 0, 0, 0,
-             0, 0, 0, 1, 1, 1, 0, 0, 0,
-             0, 0, 0, 1, 1, 1, 0, 0, 0,
-             0, 0, 0, 1, 1, 1, 0, 0, 0,
-             0, 0, 0, 0, 0, 0, 0, 0, 0,
-             0, 0, 0, 0, 0, 0, 0, 0, 0,
-             0, 0, 0, 0, 0, 0, 0, 0, 0
-        })));
+        QuantizedVector<T>({
+            0, 0, 0, 0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0, 0, 0, 0,
+            0, 0, 0, 1, 1, 1, 0, 0, 0,
+            0, 0, 0, 1, 1, 1, 0, 0, 0,
+            0, 0, 0, 1, 1, 1, 0, 0, 0,
+            0, 0, 0, 0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0, 0, 0, 0
+        },
+        inputTensorInfo.GetQuantizationScale(),
+        inputTensorInfo.GetQuantizationOffset())));
 
     armnn::TensorInfo kernelTensorInfo({ 1, 1, 3, 3}, ArmnnType);
     auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
-        QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(),
-        {
-             1, 2, 3,
-             4, 5, 6,
-             7, 8, 9
-        })));
+        QuantizedVector<T>({
+            1, 2, 3,
+            4, 5, 6,
+            7, 8, 9
+        },
+        kernelTensorInfo.GetQuantizationScale(),
+        kernelTensorInfo.GetQuantizationOffset())));
 
     uint32_t padLeft = 0;
     uint32_t padTop = 0;
@@ -2209,12 +2250,13 @@
     // Since the dilation rate is 3 this will reduce the size of the output from 9x9 to 3x3 of all 5s.
     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3}, ArmnnType);
     boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
-        {
-             5, 5, 5,
-             5, 5, 5,
-             5, 5, 5
-        })));
+        QuantizedVector<T>({
+            5, 5, 5,
+            5, 5, 5,
+            5, 5, 5
+        },
+        outputTensorInfo.GetQuantizationScale(),
+        outputTensorInfo.GetQuantizationOffset())));
 
     return DepthwiseConvolution2dTestImpl<ArmnnType, ArmnnBType>(
         workloadFactory,
@@ -2284,17 +2326,18 @@
     outputTensorInfo.SetQuantizationOffset(qOffset);
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-                                  std::vector<T>(QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                    inputTensorInfo.GetQuantizationOffset(),
-                                                                    inputNoQuantizedValues)));
+                                  std::vector<T>(QuantizedVector<T>(inputNoQuantizedValues,
+                                                                    inputTensorInfo.GetQuantizationScale(),
+                                                                    inputTensorInfo.GetQuantizationOffset())));
     auto kernel = MakeTensor<T, 4>(kernelTensorInfo,
-                                   std::vector<T>(QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(),
-                                                                     kernelTensorInfo.GetQuantizationOffset(),
-                                                                     kernelNoQuantizedValues)));
-    auto expectedOutput = MakeTensor<T, 4>(outputTensorInfo,
-                                           std::vector<T>(QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                             outputTensorInfo.GetQuantizationOffset(),
-                                                                             outputExpectedNoQuantizedValues)));
+                                   std::vector<T>(QuantizedVector<T>(kernelNoQuantizedValues,
+                                                                     kernelTensorInfo.GetQuantizationScale(),
+                                                                     kernelTensorInfo.GetQuantizationOffset())));
+    auto expectedOutput =
+        MakeTensor<T, 4>(outputTensorInfo,
+                         std::vector<T>(QuantizedVector<T>(outputExpectedNoQuantizedValues,
+                                                           outputTensorInfo.GetQuantizationScale(),
+                                                           outputTensorInfo.GetQuantizationOffset())));
 
     uint32_t padLeft = 0;
     uint32_t padTop = 0;
diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
index 1997c4b..023bbae 100644
--- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
@@ -5,6 +5,7 @@
 
 #include "DebugTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -40,11 +41,11 @@
     }
 
     boost::multi_array<T, Dim> input =
-        MakeTensor<T, Dim>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+        MakeTensor<T, Dim>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, Dim> ret(outputTensorInfo);
     ret.outputExpected =
-        MakeTensor<T, Dim>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+        MakeTensor<T, Dim>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle =
         workloadFactory.CreateTensorHandle(inputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
index e21a4b6..4e8c938 100644
--- a/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DepthToSpaceTestImpl.cpp
@@ -5,7 +5,7 @@
 
 #include "DepthToSpaceTestImpl.hpp"
 
-#include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 
 #include <armnn/ArmNN.hpp>
 
@@ -44,10 +44,12 @@
         outputInfo.SetQuantizationOffset(qOffset);
     }
 
-    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    boost::multi_array<T, 4> input =
+        MakeTensor<T, 4>(inputInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, 4> result(outputInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputData));
+    result.outputExpected =
+        MakeTensor<T, 4>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
index c84b941..cf101ee 100644
--- a/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/FullyConnectedTestImpl.cpp
@@ -7,6 +7,8 @@
 
 #include <armnn/ArmNN.hpp>
 
+#include <QuantizeHelper.hpp>
+
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 #include <backendsCommon/test/DataTypeUtils.hpp>
@@ -191,15 +193,17 @@
     LayerTestResult<T, 2> result(outputTensorInfo);
 
     boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
-        })
+        },
+        qScale, qOffset)
     );
 
     boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             2.0f, 3.0f, 4.0f, 5.0f, 6.0f
-        })
+        },
+        qScale, qOffset)
     );
 
     std::vector<T> biasValues({900000.f});
@@ -215,10 +219,7 @@
     );
 
     result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
-            965432.0f,
-        })
-    );
+                                             armnnUtils::QuantizedVector<T>({ 965432.0f }, qScale, qOffset));
 
     return result;
 }
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index 4e9cbbf..d25fcea 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -5,6 +5,7 @@
 
 #include "InstanceNormalizationTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -14,7 +15,6 @@
 #include <backendsCommon/WorkloadFactory.hpp>
 
 #include <backendsCommon/test/DataLayoutUtils.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -35,12 +35,12 @@
     float qScale = 0.0f,
     int32_t qOffset = 0)
 {
-    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputValues));
+    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
+                                        armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
-
-    result.outputExpected =
-        MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                             armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
index 5c75b6f..569f5af 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
@@ -6,6 +6,7 @@
 #include "L2NormalizationTestImpl.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 #include <TensorUtils.hpp>
 
@@ -44,10 +45,10 @@
         inputData = tmp;
     }
 
-    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(
-                                                         inputTensorInfo.GetQuantizationScale(),
-                                                         inputTensorInfo.GetQuantizationOffset(),
-                                                         inputData));
+    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
+                                        armnnUtils::QuantizedVector<T>(inputData,
+                                                                       inputTensorInfo.GetQuantizationScale(),
+                                                                       inputTensorInfo.GetQuantizationOffset()));
 
     std::vector<float> expectedOutputData = expectedOutputValues;
     if (layout == armnn::DataLayout::NHWC)
@@ -59,10 +60,11 @@
     }
 
     LayerTestResult<T, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(
-                                                               outputTensorInfo.GetQuantizationScale(),
-                                                               outputTensorInfo.GetQuantizationOffset(),
-                                                               expectedOutputData));
+    result.outputExpected =
+        MakeTensor<T, 4>(outputTensorInfo,
+                         armnnUtils::QuantizedVector<T>(expectedOutputData,
+                                                        outputTensorInfo.GetQuantizationScale(),
+                                                        outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -693,16 +695,10 @@
     const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
     const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32, 0.f, 0);
 
-    auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, QuantizedVector<float>(
-                                                             inputTensorInfo.GetQuantizationScale(),
-                                                             inputTensorInfo.GetQuantizationOffset(),
-                                                             inputData));
+    auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData);
 
     LayerTestResult<float, 2> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, QuantizedVector<float>(
-                                                                   outputTensorInfo.GetQuantizationScale(),
-                                                                   outputTensorInfo.GetQuantizationOffset(),
-                                                                   expectedOutputData));
+    result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
index 0b73d37..4c340c8 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -6,6 +6,7 @@
 #include "LogSoftmaxTestImpl.hpp"
 
 #include <Half.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -14,7 +15,6 @@
 #include <backendsCommon/IBackendInternal.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -39,7 +39,7 @@
 {
     LayerTestResult<T, NumDims> result(outputInfo);
     result.outputExpected =
-        MakeTensor<T, NumDims>(outputInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
+        MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
@@ -54,7 +54,7 @@
     inputHandle->Allocate();
     outputHandle->Allocate();
 
-    auto inputTensor = MakeTensor<T, NumDims>(inputInfo, QuantizedVector<T>(qScale, qOffset, inputValues));
+    auto inputTensor = MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputValues, qScale, qOffset));
     CopyDataToITensorHandle(inputHandle.get(), inputTensor.origin());
 
     workload->Execute();
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index c07f623..6cea777 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -5,11 +5,12 @@
 
 #include "LstmTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
+
 #include <armnn/ArmNN.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -1963,13 +1964,19 @@
     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
 
     armnn::TensorInfo inputDesc({2, 2}, datatype);
-    boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
-            std::vector<float>{2., 3., 3., 4.}));
+    boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(
+        inputDesc,
+        armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
 
     armnn::TensorInfo outputDesc({2, 4}, datatype);
-    boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>({{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
-                                          -0.0185422f,  0.11281417f,  0.24466537f, -0.1826292f}})));
+    boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(
+        outputDesc,
+        armnnUtils::QuantizedVector<int16_t>(
+            {
+                -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
+                -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
+            },
+            qScale, qOffset));
 
     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
@@ -1987,14 +1994,21 @@
     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
 
     armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
-    boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
-            std::vector<float>({ 2., 3., 3., 4. })));
+    boost::multi_array<int16_t, 2> input =
+        MakeTensor<int16_t, 2>(
+            inputDesc,
+            armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
 
     armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
-    boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>(
-            {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
-             -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
+    boost::multi_array<int16_t, 2> expectedOutput =
+        MakeTensor<int16_t, 2>(
+            outputDesc,
+            armnnUtils::QuantizedVector<int16_t>(
+                {
+                    -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
+                    -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f
+                },
+                qScale, qOffset));
 
     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
@@ -2011,20 +2025,32 @@
     const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
 
     armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
-    boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>(
-            {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
-             0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
+    boost::multi_array<int16_t, 2> input =
+        MakeTensor<int16_t, 2>(
+            inputDesc,
+            armnnUtils::QuantizedVector<int16_t>(
+                {
+                    0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
+                    0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f
+                },
+                qScale, qOffset));
 
     armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
-    boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>(
-            {-0.00396806f,  0.029352f,   -0.00279226f, 0.0159977f,  -0.00835576f,
-             -0.0211779f,   0.0283512f,  -0.0114597f,  0.00907307f, -0.0244004f,
-             -0.0152191f,  -0.0259063f,   0.00914318f, 0.00415118f,  0.017147f,
-              0.0134203f,  -0.013869f,    0.0287268f, -0.00334693f,  0.00733398f, -0.0287926f,
-             -0.0186926f,   0.0193662f,  -0.0115437f,  0.00422612f, -0.0345232f,
-              0.00223253f, -0.00957321f,  0.0210624f,  0.013331f,    0.0150954f,   0.02168f})));
+    boost::multi_array<int16_t, 2> expectedOutput =
+        MakeTensor<int16_t, 2>(
+            outputDesc,
+            armnnUtils::QuantizedVector<int16_t>(
+                {
+                    -0.00396806f,  0.02935200f, -0.00279226f,  0.01599770f,
+                    -0.00835576f, -0.02117790f,  0.02835120f, -0.01145970f,
+                     0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f,
+                     0.00914318f,  0.00415118f,  0.01714700f,  0.01342030f,
+                    -0.01386900f,  0.02872680f, -0.00334693f,  0.00733398f,
+                    -0.02879260f, -0.01869260f,  0.01936620f, -0.01154370f,
+                     0.00422612f, -0.03452320f,  0.00223253f, -0.00957321f,
+                     0.02106240f,  0.01333100f,  0.01509540f,  0.02168000f
+                },
+                qScale, qOffset));
 
     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
@@ -2040,13 +2066,20 @@
     const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
 
     armnn::TensorInfo inputDesc({2, 2}, datatype);
-    boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>{2., 3., 3., 4.}));
+    boost::multi_array<int16_t , 2> input =
+        MakeTensor<int16_t , 2>(inputDesc,
+                                armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
 
     armnn::TensorInfo outputDesc({2, 4}, datatype);
-    boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
-            qOffset, std::vector<float>({{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
-                                          -0.0185422f,  0.11281417f,  0.24466537f, -0.1826292f}})));
+    boost::multi_array<int16_t, 2> expectedOutput =
+        MakeTensor<int16_t, 2>(
+            outputDesc,
+            armnnUtils::QuantizedVector<int16_t>(
+                {
+                    -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
+                    -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
+                },
+                qScale, qOffset));
 
     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
         workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
diff --git a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
index 82b772e..0f9a30e 100644
--- a/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/PadTestImpl.cpp
@@ -5,6 +5,8 @@
 
 #include "PadTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
+
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -28,28 +30,27 @@
     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
-    std::vector<T> inputValues(
-    QuantizedVector<T>(qScale, qOffset,
-    {
-      // Height (3) x Width (3)
-      4, 8, 6,
-      7, 4, 4,
-      3, 2, 4
-    }));
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+        {
+            // Height (3) x Width (3)
+            4, 8, 6,
+            7, 4, 4,
+            3, 2, 4
+        },
+        qScale, qOffset);
 
     auto p = customPaddingValue;
-    std::vector<T> expectedOutputValues;
-    expectedOutputValues = (
-    QuantizedVector<T>(qScale, qOffset,
-    {
-      p, p, p, p, p, p, p,
-      p, p, p, p, p, p, p,
-      p, p, 4, 8, 6, p, p,
-      p, p, 7, 4, 4, p, p,
-      p, p, 3, 2, 4, p, p,
-      p, p, p, p, p, p, p,
-      p, p, p, p, p, p, p
-    }));
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+        {
+            p, p, p, p, p, p, p,
+            p, p, p, p, p, p, p,
+            p, p, 4, 8, 6, p, p,
+            p, p, 7, 4, 4, p, p,
+            p, p, 3, 2, 4, p, p,
+            p, p, p, p, p, p, p,
+            p, p, p, p, p, p, p
+        },
+        qScale, qOffset);
 
     auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
 
@@ -100,41 +101,39 @@
     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
-    std::vector<T> inputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
-        // Channel 0, Height (2) x Width (2)
-        0, 4,
-        2, 5,
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+        {
+            // Channel 0, Height (2) x Width (2)
+            0, 4,
+            2, 5,
 
-        // Channel 1, Height (2) x Width (2)
-        6, 1,
-        5, 2
-    }));
+            // Channel 1, Height (2) x Width (2)
+            6, 1,
+            5, 2
+        },
+        qScale, qOffset);
 
-    std::vector<T> expectedOutputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+        {
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 4, 0, 0,
+            0, 0, 2, 5, 0, 0,
+            0, 0, 0, 0, 0, 0,
 
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 4, 0, 0,
-        0, 0, 2, 5, 0, 0,
-        0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+            0, 0, 6, 1, 0, 0,
+            0, 0, 5, 2, 0, 0,
+            0, 0, 0, 0, 0, 0,
 
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 6, 1, 0, 0,
-        0, 0, 5, 2, 0, 0,
-        0, 0, 0, 0, 0, 0,
-
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0,
-        0, 0, 0, 0, 0, 0
-
-    }));
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0,
+            0, 0, 0, 0, 0, 0
+       },
+       qScale, qOffset);
 
     auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
 
@@ -185,193 +184,193 @@
     const armnn::TensorInfo inputTensorInfo(inputShape, ArmnnType, qScale, qOffset);
     const armnn::TensorInfo outputTensorInfo(outputShape, ArmnnType, qScale, qOffset);
 
-    std::vector<T> inputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
-        // Batch 0, Channel 0, Height (3) x Width (2)
-        0, 1,
-        2, 3,
-        4, 5,
+    std::vector<T> inputValues = armnnUtils::QuantizedVector<T>(
+        {
+            // Batch 0, Channel 0, Height (3) x Width (2)
+             0,  1,
+             2,  3,
+             4,  5,
 
-        // Batch 0, Channel 1, Height (3) x Width (2)
-        6, 7,
-        8, 9,
-        10, 11,
+            // Batch 0, Channel 1, Height (3) x Width (2)
+             6,  7,
+             8,  9,
+            10, 11,
 
-        // Batch 1, Channel 0, Height (3) x Width (2)
-        12, 13,
-        14, 15,
-        16, 17,
+            // Batch 1, Channel 0, Height (3) x Width (2)
+            12, 13,
+            14, 15,
+            16, 17,
 
-        // Batch 1, Channel 1, Height (3) x Width (2)
-        18, 19,
-        20, 21,
-        22, 23
-    }));
+            // Batch 1, Channel 1, Height (3) x Width (2)
+            18, 19,
+            20, 21,
+            22, 23
+        },
+        qScale, qOffset);
 
-    std::vector<T> expectedOutputValues(
-      QuantizedVector<T>(qScale,qOffset,
-    {
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+    std::vector<T> expectedOutputValues = armnnUtils::QuantizedVector<T>(
+        {
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 1, 0,
-        0, 2, 3, 0,
-        0, 4, 5, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 1, 0,
+            0, 2, 3, 0,
+            0, 4, 5, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 6, 7, 0,
-        0, 8, 9, 0,
-        0, 10, 11, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 6, 7, 0,
+            0, 8, 9, 0,
+            0, 10, 11, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 12, 13, 0,
-        0, 14, 15, 0,
-        0, 16, 17, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 12, 13, 0,
+            0, 14, 15, 0,
+            0, 16, 17, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 18, 19, 0,
-        0, 20, 21, 0,
-        0, 22, 23, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 18, 19, 0,
+            0, 20, 21, 0,
+            0, 22, 23, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
 
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0,
-        0, 0, 0, 0
-    }));
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0,
+            0, 0, 0, 0
+        },
+        qScale, qOffset);
 
     auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
 
diff --git a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
index ef48c97..fe0d076 100644
--- a/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PermuteTestImpl.hpp
@@ -12,7 +12,6 @@
 #include <backendsCommon/IBackendInternal.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
 #include <test/TensorHelpers.hpp>
diff --git a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
index f250fa5..fcc8980 100644
--- a/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Pooling2dTestImpl.cpp
@@ -9,12 +9,12 @@
 
 #include <DataLayoutIndexed.hpp>
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 #include <TensorUtils.hpp>
 
 #include <backendsCommon/WorkloadInfo.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -25,6 +25,8 @@
 namespace
 {
 
+using namespace armnnUtils;
+
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 4> SimplePooling2dTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
@@ -187,7 +189,7 @@
     inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
     std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
 
     // These were calculated manually.
     auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
@@ -195,7 +197,7 @@
     if (forceNoPadding)
     {
         outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-            QuantizedVector<T>(qScale, qOffset, {
+            QuantizedVector<T>({
                  8.0f,  8.0f,  8.0f,
                  9.0f,  7.0f,  9.0f,
                  9.0f,  9.0f,  9.0f,
@@ -211,12 +213,13 @@
                  0.0f,  0.0f, -3.0f,
                 -1.0f,  0.0f,  0.0f,
                 -1.0f, -1.0f, -1.0f
-        }));
+            },
+            qScale, qOffset));
     }
     else
     {
         outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-            QuantizedVector<T>(qScale, qOffset, {
+            QuantizedVector<T>({
                 0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
                 0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
                 0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
@@ -232,7 +235,8 @@
                 0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
                 0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
                 0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f
-        }));
+            },
+            qScale, qOffset));
     }
 
     return SimplePooling2dTestImpl<ArmnnType>(
@@ -267,7 +271,7 @@
     }
 
     std::vector<T> inputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
              1.0f,  2.0f,  5.0f,  6.0f,
              3.0f,  4.0f,  7.0f,  8.0f,
              9.0f, 10.0f, 13.0f, 14.0f,
@@ -277,16 +281,18 @@
             19.0f, 20.0f, 23.0f, 24.0f,
             25.0f, 26.0f, 29.0f, 30.0f,
             27.0f, 28.0f, 31.0f, 32.0f,
-        }));
+        },
+        qScale, qOffset));
 
     std::vector<T> outputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
              4.0f,  8.0f,
             12.0f, 16.0f,
 
             20.0f, 24.0f,
             28.0f, 32.0f,
-        }));
+        },
+        qScale, qOffset));
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
     if (dataLayout == armnn::DataLayout::NHWC)
@@ -336,7 +342,7 @@
     }
 
     std::vector<T> inputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
              2.0f,  2.0f,  6.0f,  6.0f,
              4.0f,  4.0f,  8.0f,  8.0f,
             10.0f, 12.0f, 14.0f, 16.0f,
@@ -346,16 +352,18 @@
             20.0f, 18.0f, 22.0f, 24.0f,
             26.0f, 28.0f,  0.0f,  0.0f,
             26.0f, 28.0f,  0.0f,  0.0f,
-        }));
+        },
+        qScale, qOffset));
 
     std::vector<T> outputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
              3.0f,  7.0f,
             11.0f, 15.0f,
 
             19.0f, 23.0f,
             27.0f,  0.0f,
-        }));
+        },
+        qScale, qOffset));
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
     if (dataLayout == armnn::DataLayout::NHWC)
@@ -447,7 +455,7 @@
     armnn::TensorInfo outputTensorInfo = armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
 
     std::vector<T> inputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0f, 7.0f, 5.0f, 5.0f,
             1.0f, 7.0f, 5.0f, 5.0f,
             3.0f, 3.0f, 1.0f, 1.0f,
@@ -457,16 +465,18 @@
             1.0f, 7.0f, 2.0f, 0.0f,
             0.0f, 2.0f, 1.0f, 1.0f,
             0.0f, 0.0f, 1.0f, 1.0f,
-        }));
+        },
+        qScale, qOffset));
 
     std::vector<T> outputData(
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             5.0f, 5.0f,
             3.0f, 1.0f,
 
             5.0f, 1.0f,
             1.0f, 1.0f,
-        }));
+        },
+        qScale, qOffset));
 
     const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
     if (dataLayout == armnn::DataLayout::NHWC)
@@ -503,19 +513,21 @@
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f, 1.0f, 5.0f, 2.0f,
             1.0f, 2.0f, 2.0f, 1.0f,
             5.0f, 4.0f, 1.0f, 5.0f,
             2.0f, 1.0f, 5.0f, 2.0f,
-        }));
+        },
+        qScale, qOffset));
 
     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f, 3.0f,
             3.0f, 3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -536,7 +548,7 @@
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
@@ -546,15 +558,17 @@
             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
-        }));
+        },
+        qScale, qOffset));
 
     armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, ArmnnType);
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f, 3.0f, 3.0f,
             3.0f, 3.0f, 3.0f,
             3.0f, 3.0f, 3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -575,7 +589,7 @@
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
@@ -583,14 +597,16 @@
             2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
-        }));
+        },
+        qScale, qOffset));
 
     armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, ArmnnType);
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f, 3.0f,
             3.0f, 3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -611,7 +627,7 @@
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0f, 0.0f, 2.0f, 0.0f,  3.0f, 0.0f, 4.0f,
             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
             0.0f, 5.0f, 0.0f, 6.0f,  0.0f, 7.0f, 0.0f,
@@ -619,13 +635,15 @@
             0.0f, 5.0f, 0.0f, 2.0f,  0.0f, 1.0f, 1.0f,
             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
             0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
-        }));
+        },
+        qScale, qOffset));
 
     armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -646,7 +664,7 @@
 
     armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, ArmnnType);
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
@@ -656,13 +674,15 @@
             2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
             1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
             5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
-        }));
+        },
+        qScale, qOffset));
 
     armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, ArmnnType);
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -693,15 +713,17 @@
 
     // Construct input data.
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0f, 3.0f, 4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     // These were calculated manually.
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             0.0f, 3.0f, 0.0f, 3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -883,11 +905,11 @@
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
-                         QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
+        forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
+                         QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -961,11 +983,11 @@
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputData, qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
-                         QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
+        forceNoPadding ? QuantizedVector<T>(expectedOutputDataNoPadding, qScale, qOffset) :
+                         QuantizedVector<T>(expectedOutputDataWithPadding, qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1002,19 +1024,21 @@
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -1.0f, -2.0f,  3.0f,  4.0f,
             -1.0f, -2.0f,  3.0f,  4.0f,
              1.0f,  2.0f, -3.0f, -4.0f,
              1.0f,  2.0f, -3.0f, -4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -1.0f,  3.0f,  4.0f,
              1.0f,  3.0f,  4.0f,
              1.0f,  2.0f, -4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1050,20 +1074,22 @@
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -1.0f, -2.0f,  3.0f,  4.0f,
             -1.0f, -2.0f,  3.0f,  4.0f,
              1.0f,  2.0f, -3.0f, -4.0f,
              1.0f,  2.0f, -3.0f, -4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             -1.0f,  3.0f,  4.0f,  4.0f,
              2.0f,  3.0f,  4.0f,  4.0f,
              2.0f,  3.0f,  4.0f,  4.0f,
              2.0f,  2.0f,  2.0f, -3.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1099,19 +1125,21 @@
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             12.0f, 20.0f, 32.0f, 40.0f,
             12.0f, 20.0f, 32.0f, 40.0f,
             12.0f, 20.0f, 32.0f, 40.0f,
             12.0f, 20.0f, 32.0f, 40.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             3.0f,  13.0f,  10.0f,
             6.0f,  26.0f,  20.0f,
             3.0f,  13.0f,  10.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1148,18 +1176,20 @@
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f, 3.5f,
             2.0f, 3.5f
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1195,20 +1225,22 @@
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             9.0f,   27.0f,  18.0f,  36.0f,
             18.0f,   9.0f,  18.0f,   9.0f,
             27.0f,  18.0f,   9.0f,  27.0f,
             9.0f,   27.0f,   9.0f,  18.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
              7.0f,  11.0f,  13.0f, 9.0f,
             12.0f,  17.0f,  19.0f, 13.0f,
             12.0f,  16.0f,  16.0f, 10.0f,
              9.0f,  11.0f,  12.0f, 7.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1244,19 +1276,21 @@
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             2.0f,  4.0f, 8.0f, 16.0f,
             4.0f,  2.0f, 2.0f, 4.0f,
             8.0f,  2.0f, 4.0f, 2.0f,
             16.0f, 2.0f, 2.0f, 8.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
                1.0f,     4.4721f,   8.0f,
             4.4721f,     2.6457f,   2.236f,
                8.0f,     1.4142f,   4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
@@ -1292,20 +1326,22 @@
     }
 
     auto input = MakeTensor<T, 4>(inputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
             1.0f, 2.0f, 3.0f, 4.0f,
-        }));
+        },
+        qScale, qOffset));
 
     auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-        QuantizedVector<T>(qScale, qOffset, {
+        QuantizedVector<T>({
             1.0540f, 1.7638f, 2.5385f, 2.3570f,
             1.2909f, 2.1602f, 3.1091f, 2.8867f,
             1.2909f, 2.1602f, 3.1091f, 2.8867f,
             1.0540f, 1.7638f, 2.5385f, 2.3570f,
-        }));
+        },
+        qScale, qOffset));
 
     return SimplePooling2dTestImpl<ArmnnType>(
         workloadFactory, memoryManager, descriptor, qScale, qOffset, input, outputExpected);
diff --git a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
index 18a5bd0..dc9b908 100644
--- a/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/PreluTestImpl.hpp
@@ -7,6 +7,7 @@
 
 #include "LayerTestResult.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -57,18 +58,22 @@
        0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, -1.0f, -2.0f, 0.0f, -2.0f, -4.0f
     };
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
-    auto alpha = MakeTensor<T, 4>(alphaTensorInfo, QuantizedVector<T>(alphaTensorInfo.GetQuantizationScale(),
-                                                                      alphaTensorInfo.GetQuantizationOffset(),
-                                                                      alphaData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
+
+    auto alpha = MakeTensor<T, 4>(alphaTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(alphaData,
+                                                                 alphaTensorInfo.GetQuantizationScale(),
+                                                                 alphaTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputExpectedData));
+    result.outputExpected =
+        MakeTensor<T, 4>(outputTensorInfo,
+                         armnnUtils::QuantizedVector<T>(outputExpectedData,
+                                                        outputTensorInfo.GetQuantizationScale(),
+                                                        outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> alphaHandle  = workloadFactory.CreateTensorHandle(alphaTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp
index bb2392f..56ce51a 100644
--- a/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ResizeTestImpl.hpp
@@ -8,6 +8,7 @@
 #include "LayerTestResult.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 #include <TensorUtils.hpp>
 
@@ -76,9 +77,10 @@
         inputData = tmp;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = input;
@@ -174,15 +176,16 @@
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -278,15 +281,16 @@
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -377,15 +381,16 @@
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -484,15 +489,16 @@
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -573,9 +579,10 @@
         inputData = tmp;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = input;
@@ -670,15 +677,16 @@
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -773,15 +781,16 @@
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -871,15 +880,16 @@
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -978,15 +988,16 @@
         outputData = tmp1;
     }
 
-    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
-                                                                      inputTensorInfo.GetQuantizationOffset(),
-                                                                      inputData));
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+                                  armnnUtils::QuantizedVector<T>(inputData,
+                                                                 inputTensorInfo.GetQuantizationScale(),
+                                                                 inputTensorInfo.GetQuantizationOffset()));
 
     LayerTestResult<T, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
-                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
-                                                                outputTensorInfo.GetQuantizationOffset(),
-                                                                outputData));
+                                             armnnUtils::QuantizedVector<T>(outputData,
+                                                                            outputTensorInfo.GetQuantizationScale(),
+                                                                            outputTensorInfo.GetQuantizationOffset()));
 
     std::unique_ptr <armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
index f0479c8..a60b189 100644
--- a/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SliceTestImpl.cpp
@@ -5,6 +5,7 @@
 
 #include "SliceTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -39,11 +40,11 @@
     }
 
     boost::multi_array<T, NumDims> input =
-        MakeTensor<T, NumDims>(inputInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+        MakeTensor<T, NumDims>(inputInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, NumDims> result(outputInfo);
     result.outputExpected =
-        MakeTensor<T, NumDims>(outputInfo, QuantizedVector<T>(qScale, qOffset, expectedOutputData));
+        MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index c0b62aa..a5f6477 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -5,13 +5,13 @@
 
 #include "SoftmaxTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -85,8 +85,7 @@
     LayerTestResult<T, n> ret(outputTensorInfo);
 
     // Each row is independently softmax'd.
-    auto input = MakeTensor<T, n>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, inputData)));
+    auto input = MakeTensor<T, n>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
@@ -111,8 +110,7 @@
 
     CopyDataFromITensorHandle(ret.output.origin(), outputHandle.get());
 
-    std::vector<T> expectedOutput = std::vector<T>(
-            QuantizedVector<T>(qScale, qOffset, outputData));
+    std::vector<T> expectedOutput = armnnUtils::QuantizedVector<T>(outputData, qScale, qOffset);
     ret.outputExpected = MakeTensor<T, n>(outputTensorInfo, expectedOutput);
 
     return ret;
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
index 094ed23..f815604 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToBatchNdTestImpl.cpp
@@ -6,6 +6,7 @@
 #include "SpaceToBatchNdTestImpl.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -55,10 +56,12 @@
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
+                                                      armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, 4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                          armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
index 48e157d..0541323 100644
--- a/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SpaceToDepthTestImpl.cpp
@@ -6,6 +6,7 @@
 #include "SpaceToDepthTestImpl.hpp"
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -56,10 +57,12 @@
         outputTensorInfo.SetQuantizationOffset(qOffset);
     }
 
-    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
+                                                      armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, 4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                          armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
index 1716091..7aebdd0 100644
--- a/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SplitterTestImpl.cpp
@@ -5,11 +5,11 @@
 
 #include "SplitterTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
 
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -80,7 +80,7 @@
     LayerTestResult<T,3> ret4(outputTensorInfo4);
 
     auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
             6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
             11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
@@ -101,24 +101,26 @@
             76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
             81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
             86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
-        })
+        },
+        qScale, qOffset)
     ));
 
     // Channel 0 of the original input.
     ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
             6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
             11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
             16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
             21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
             26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
-        })
+        },
+        qScale, qOffset)
     ));
 
     // Channel 1 & 2 of the original input.
     ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
             36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
             41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
@@ -132,31 +134,34 @@
             76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
             81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
             86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
-        })
+        },
+        qScale, qOffset)
     ));
 
     // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
     ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
             36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
             41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
             46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
             51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
             56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
-        })
+        },
+        qScale, qOffset)
     ));
 
     // Channel 1 of return 2.
     ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
-        QuantizedVector<T>(qScale, qOffset, {
+        armnnUtils::QuantizedVector<T>({
             61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
             66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
             71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
             76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
             81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
             86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
-        })
+        },
+        qScale, qOffset)
     ));
 
     // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
@@ -253,29 +258,31 @@
     float qScale, int32_t qOffset)
 {
     const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, ArmnnType, qScale, qOffset);
-    auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
-                                                                 {
-                                                                     1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
-                                                                     6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
-                                                                     11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
-                                                                     16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
-                                                                     21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
-                                                                     26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
+    auto input = MakeTensor<T, 3>(
+        tensorInfo,
+        armnnUtils::QuantizedVector<T>({
+             1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
+             6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+            11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
+            16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
+            21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
+            26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
 
-                                                                     31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
-                                                                     36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
-                                                                     41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
-                                                                     46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
-                                                                     51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
-                                                                     56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
+            31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
+            36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
+            41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
+            46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
+            51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
+            56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
 
-                                                                     61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
-                                                                     66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
-                                                                     71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
-                                                                     76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
-                                                                     81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
-                                                                     86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
-                                                                 }));
+            61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
+            66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
+            71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
+            76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
+            81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
+            86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
+        },
+        qScale, qOffset));
 
     std::vector<unsigned int> origin = { 0, 0, 0 };
     armnn::SplitterQueueDescriptor::ViewOrigin window(origin);
diff --git a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
index b32e622..515b5a0 100644
--- a/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/StridedSliceTestImpl.cpp
@@ -5,6 +5,7 @@
 
 #include "StridedSliceTestImpl.hpp"
 
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/ArmNN.hpp>
@@ -39,11 +40,11 @@
     }
 
     boost::multi_array<T, InDim> input =
-        MakeTensor<T, InDim>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+        MakeTensor<T, InDim>(inputTensorInfo, armnnUtils::QuantizedVector<T>(inputData, qScale, qOffset));
 
     LayerTestResult<T, OutDim> ret(outputTensorInfo);
     ret.outputExpected =
-        MakeTensor<T, OutDim>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+        MakeTensor<T, OutDim>(outputTensorInfo, armnnUtils::QuantizedVector<T>(outputExpectedData, qScale, qOffset));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle =
         workloadFactory.CreateTensorHandle(inputTensorInfo);
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp
index 7391f9c..a2b477c 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.hpp
@@ -8,12 +8,12 @@
 #include <armnn/ArmNN.hpp>
 
 #include <Permute.hpp>
+#include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 
 #include <backendsCommon/test/DataLayoutUtils.hpp>
-#include <backendsCommon/test/QuantizeHelper.hpp>
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
@@ -146,14 +146,16 @@
     TensorData<T> input =
     {
         inputInfo,
-        QuantizedVector<T>(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), inputData)
+        armnnUtils::QuantizedVector<T>(inputData, inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset())
     };
 
     // set up weights
     TensorData<T> weights =
     {
         weightsInfo,
-        QuantizedVector<T>(weightsInfo.GetQuantizationScale(), weightsInfo.GetQuantizationOffset(), weightsData)
+        armnnUtils::QuantizedVector<T>(weightsData,
+                                       weightsInfo.GetQuantizationScale(),
+                                       weightsInfo.GetQuantizationOffset())
     };
 
     // set up biases
@@ -164,7 +166,9 @@
         TensorData<BT> biases =
         {
             biasesInfo,
-            QuantizedVector<BT>(biasesInfo.GetQuantizationScale(), biasesInfo.GetQuantizationOffset(), biasesData)
+            armnnUtils::QuantizedVector<BT>(biasesData,
+                                            biasesInfo.GetQuantizationScale(),
+                                            biasesInfo.GetQuantizationOffset())
         };
 
         optionalBiases = Optional<TensorData<BT>>(biases);
@@ -186,9 +190,9 @@
     LayerTestResult<T, 4> testResult(outputInfo);
     testResult.output         = MakeTensor<T, 4>(outputInfo, output.second);
     testResult.outputExpected = MakeTensor<T, 4>(outputInfo,
-                                                 QuantizedVector<T>(outputInfo.GetQuantizationScale(),
-                                                                    outputInfo.GetQuantizationOffset(),
-                                                                    expectedOutputData));
+                                                 armnnUtils::QuantizedVector<T>(expectedOutputData,
+                                                                                outputInfo.GetQuantizationScale(),
+                                                                                outputInfo.GetQuantizationOffset()));
 
     return testResult;
 }