IVGCVSW-5256 Use CreateTensorHandle() function from TensorHandleFactory in the tests for layers  Q,R & T

Signed-off-by: Finn Williams <Finn.Williams@arm.com>

Change-Id: I6fc613d31785298a0b7ed18f1abdd59bafed1e8e
diff --git a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
index d166695..5a36856 100644
--- a/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/QuantizeTestImpl.cpp
@@ -23,6 +23,7 @@
 LayerTestResult<T, Dim> QuantizeTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::TensorInfo& inputTensorInfo,
     const armnn::TensorInfo& outputTensorInfo,
     const std::vector<float>& inputData,
@@ -35,10 +36,8 @@
     LayerTestResult<T, Dim> ret(outputTensorInfo);
     ret.outputExpected = MakeTensor<T, Dim>(outputTensorInfo, expectedOutputData);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::WorkloadInfo info;
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
@@ -61,7 +60,8 @@
 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
 LayerTestResult<T, 4> QuantizeSimpleTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     armnn::QuantizeQueueDescriptor desc;
 
@@ -86,6 +86,7 @@
 
     return QuantizeTestImpl<T, 4>(workloadFactory,
                                   memoryManager,
+                                  tensorHandleFactory,
                                   inputTensorInfo,
                                   outputTensorInfo,
                                   inputData,
@@ -96,7 +97,8 @@
 template <armnn::DataType ArmnnOutputType, typename T = armnn::ResolveType<ArmnnOutputType>>
 LayerTestResult<T, 4> QuantizeClampTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     armnn::QuantizeQueueDescriptor desc;
 
@@ -118,6 +120,7 @@
 
     return QuantizeTestImpl<T, 4>(workloadFactory,
                                   memoryManager,
+                                  tensorHandleFactory,
                                   inputTensorInfo,
                                   outputTensorInfo,
                                   inputData,
@@ -129,35 +132,40 @@
 
 LayerTestResult<uint8_t, 4> QuantizeSimpleUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
+    return QuantizeSimpleTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<uint8_t, 4> QuantizeClampUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager);
+    return QuantizeClampTest<armnn::DataType::QAsymmU8>(workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<int8_t, 4> QuantizeClampAsymmInt8Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return QuantizeClampTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager);
+    return QuantizeClampTest<armnn::DataType::QAsymmS8>(workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<int8_t, 4> QuantizeClampInt8Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return QuantizeClampTest<armnn::DataType::QSymmS8>(workloadFactory, memoryManager);
+    return QuantizeClampTest<armnn::DataType::QSymmS8>(workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<int16_t, 4> QuantizeClampInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager);
+    return QuantizeClampTest<armnn::DataType::QSymmS16>(workloadFactory, memoryManager, tensorHandleFactory);
 }