IVGCVSW-5252 Use CreateTensorHandle() function from TensorHandleFactory in the tests for layers between G-L

Signed-off-by: Finn Williams <Finn.Williams@arm.com>
Change-Id: I197351a479fb211787bd12a73c9618d2ded95898
diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
index b57f2ef..7fabff6 100644
--- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.cpp
@@ -24,6 +24,7 @@
 LayerTestResult<T, OutputDim> GatherTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::TensorInfo& paramsInfo,
     const armnn::TensorInfo& indicesInfo,
     const armnn::TensorInfo& outputInfo,
@@ -38,11 +39,9 @@
     LayerTestResult<T, OutputDim> result(outputInfo);
     result.outputExpected = MakeTensor<T, OutputDim>(outputInfo, outputData);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    std::unique_ptr<armnn::ITensorHandle> paramsHandle = workloadFactory.CreateTensorHandle(paramsInfo);
-    std::unique_ptr<armnn::ITensorHandle> indicesHandle = workloadFactory.CreateTensorHandle(indicesInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+    std::unique_ptr<armnn::ITensorHandle> paramsHandle = tensorHandleFactory.CreateTensorHandle(paramsInfo);
+    std::unique_ptr<armnn::ITensorHandle> indicesHandle = tensorHandleFactory.CreateTensorHandle(indicesInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
 
     armnn::GatherQueueDescriptor data;
     armnn::WorkloadInfo info;
@@ -71,7 +70,8 @@
 {
     static LayerTestResult<T, 1> Gather1dParamsTestImpl(
         armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
     {
         armnn::TensorInfo paramsInfo({ 8 }, ArmnnType);
         armnn::TensorInfo indicesInfo({ 4 }, armnn::DataType::Signed32);
@@ -91,6 +91,7 @@
         return GatherTestImpl<ArmnnType, T, 1, 1, 1>(
             workloadFactory,
             memoryManager,
+            tensorHandleFactory,
             paramsInfo,
             indicesInfo,
             outputInfo,
@@ -101,7 +102,8 @@
 
     static LayerTestResult<T, 2> GatherMultiDimParamsTestImpl(
         armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
     {
         armnn::TensorInfo paramsInfo({ 5, 2 }, ArmnnType);
         armnn::TensorInfo indicesInfo({ 3 }, armnn::DataType::Signed32);
@@ -122,6 +124,7 @@
         return GatherTestImpl<ArmnnType, T, 2, 1, 2>(
             workloadFactory,
             memoryManager,
+            tensorHandleFactory,
             paramsInfo,
             indicesInfo,
             outputInfo,
@@ -132,7 +135,8 @@
 
     static LayerTestResult<T, 4> GatherMultiDimParamsMultiDimIndicesTestImpl(
         armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
     {
         armnn::TensorInfo paramsInfo({ 3, 2, 3}, ArmnnType);
         armnn::TensorInfo indicesInfo({ 2, 3 }, armnn::DataType::Signed32);
@@ -180,6 +184,7 @@
         return GatherTestImpl<ArmnnType, T, 3, 2, 4>(
             workloadFactory,
             memoryManager,
+            tensorHandleFactory,
             paramsInfo,
             indicesInfo,
             outputInfo,
@@ -194,7 +199,8 @@
 {
     static LayerTestResult<T, 1> Gather1dParamsTestImpl(
         armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
     {
         using namespace half_float::literal;
 
@@ -209,6 +215,7 @@
         return GatherTestImpl<armnn::DataType::Float16, T, 1, 1, 1>(
             workloadFactory,
             memoryManager,
+            tensorHandleFactory,
             paramsInfo,
             indicesInfo,
             outputInfo,
@@ -219,7 +226,8 @@
 
     static LayerTestResult<T, 2> GatherMultiDimParamsTestImpl(
         armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
     {
         using namespace half_float::literal;
 
@@ -235,6 +243,7 @@
         return GatherTestImpl<armnn::DataType::Float16, T, 2, 1, 2>(
             workloadFactory,
             memoryManager,
+            tensorHandleFactory,
             paramsInfo,
             indicesInfo,
             outputInfo,
@@ -245,7 +254,8 @@
 
     static LayerTestResult<T, 4> GatherMultiDimParamsMultiDimIndicesTestImpl(
         armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory)
     {
         using namespace half_float::literal;
 
@@ -287,6 +297,7 @@
         return GatherTestImpl<armnn::DataType::Float16, T, 3, 2, 4>(
             workloadFactory,
             memoryManager,
+            tensorHandleFactory,
             paramsInfo,
             indicesInfo,
             outputInfo,
@@ -300,113 +311,135 @@
 
 LayerTestResult<float, 1> Gather1dParamsFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return GatherTestHelper<armnn::DataType::Float32>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
+    return GatherTestHelper<armnn::DataType::Float32>::Gather1dParamsTestImpl(
+            workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<armnn::Half, 1> Gather1dParamsFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return GatherTestHelper<armnn::DataType::Float16>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
+    return GatherTestHelper<armnn::DataType::Float16>::Gather1dParamsTestImpl(
+            workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<uint8_t, 1> Gather1dParamsUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return GatherTestHelper<armnn::DataType::QAsymmU8>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
+    return GatherTestHelper<armnn::DataType::QAsymmU8>::Gather1dParamsTestImpl(
+            workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<int16_t, 1> Gather1dParamsInt16Test(
         armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return GatherTestHelper<armnn::DataType::QSymmS16>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
+    return GatherTestHelper<armnn::DataType::QSymmS16>::Gather1dParamsTestImpl(
+            workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<int32_t, 1> Gather1dParamsInt32Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return GatherTestHelper<armnn::DataType::Signed32>::Gather1dParamsTestImpl(workloadFactory, memoryManager);
+    return GatherTestHelper<armnn::DataType::Signed32>::Gather1dParamsTestImpl(
+            workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<float, 2> GatherMultiDimParamsFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return GatherTestHelper<armnn::DataType::Float32>::GatherMultiDimParamsTestImpl(workloadFactory, memoryManager);
+    return GatherTestHelper<armnn::DataType::Float32>::GatherMultiDimParamsTestImpl(
+            workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<armnn::Half, 2> GatherMultiDimParamsFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    return GatherTestHelper<armnn::DataType::Float16>::GatherMultiDimParamsTestImpl(workloadFactory, memoryManager);
+    return GatherTestHelper<armnn::DataType::Float16>::GatherMultiDimParamsTestImpl(
+            workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return GatherTestHelper<armnn::DataType::QAsymmU8>::GatherMultiDimParamsTestImpl(
-        workloadFactory, memoryManager);
+        workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return GatherTestHelper<armnn::DataType::QSymmS16>::GatherMultiDimParamsTestImpl(
-        workloadFactory, memoryManager);
+        workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<int32_t, 2> GatherMultiDimParamsInt32Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return GatherTestHelper<armnn::DataType::Signed32>::GatherMultiDimParamsTestImpl(
-            workloadFactory, memoryManager);
+            workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return GatherTestHelper<armnn::DataType::Float32>::GatherMultiDimParamsMultiDimIndicesTestImpl(
-        workloadFactory, memoryManager);
+        workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<armnn::Half, 4> GatherMultiDimParamsMultiDimIndicesFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return GatherTestHelper<armnn::DataType::Float16>::GatherMultiDimParamsMultiDimIndicesTestImpl(
-        workloadFactory, memoryManager);
+        workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return GatherTestHelper<armnn::DataType::QAsymmU8>::GatherMultiDimParamsMultiDimIndicesTestImpl(
-        workloadFactory, memoryManager);
+        workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return GatherTestHelper<armnn::DataType::QSymmS16>::GatherMultiDimParamsMultiDimIndicesTestImpl(
-        workloadFactory, memoryManager);
+        workloadFactory, memoryManager, tensorHandleFactory);
 }
 
 LayerTestResult<int32_t, 4> GatherMultiDimParamsMultiDimIndicesInt32Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     return GatherTestHelper<armnn::DataType::Signed32>::GatherMultiDimParamsMultiDimIndicesTestImpl(
-            workloadFactory, memoryManager);
+            workloadFactory, memoryManager, tensorHandleFactory);
 }
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp
index 0454c77..8c37f92 100644
--- a/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/GatherTestImpl.hpp
@@ -14,60 +14,75 @@
 
 LayerTestResult<float, 1> Gather1dParamsFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<armnn::Half, 1> Gather1dParamsFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<uint8_t, 1> Gather1dParamsUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 1> Gather1dParamsInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int32_t, 1> Gather1dParamsInt32Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 2> GatherMultiDimParamsFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<armnn::Half, 2> GatherMultiDimParamsFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<uint8_t, 2> GatherMultiDimParamsUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 2> GatherMultiDimParamsInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int32_t, 2> GatherMultiDimParamsInt32Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 4> GatherMultiDimParamsMultiDimIndicesFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<armnn::Half, 4> GatherMultiDimParamsMultiDimIndicesFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<uint8_t, 4> GatherMultiDimParamsMultiDimIndicesUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 4> GatherMultiDimParamsMultiDimIndicesInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int32_t, 4> GatherMultiDimParamsMultiDimIndicesInt32Test(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
\ No newline at end of file
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
index 58ac0e7..2e205dd 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.cpp
@@ -26,6 +26,7 @@
 LayerTestResult<T, 4> InstanceNormTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::TensorInfo& inputTensorInfo,
     const armnn::TensorInfo& outputTensorInfo,
     const std::vector<float>& inputValues,
@@ -42,10 +43,8 @@
     result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
                                              armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::WorkloadInfo info;
 
@@ -71,6 +70,7 @@
 LayerTestResult<T, 4> InstanceNormTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout)
 {
     // BatchSize: 2
@@ -143,6 +143,7 @@
     return InstanceNormTestImpl<ArmnnType>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         inputTensorInfo,
         outputTensorInfo,
         inputValues,
@@ -154,6 +155,7 @@
 LayerTestResult<T, 4> InstanceNormTest2(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout)
 {
     // BatchSize: 2
@@ -227,6 +229,7 @@
     return InstanceNormTestImpl<ArmnnType>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         inputTensorInfo,
         outputTensorInfo,
         inputValues,
@@ -239,31 +242,35 @@
 LayerTestResult<float, 4> InstanceNormFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout)
 {
-    return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
+    return InstanceNormTest<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
 }
 
 LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout)
 {
-    return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
+    return InstanceNormTest<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
 }
 
 LayerTestResult<float, 4> InstanceNormFloat32Test2(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout)
 {
-    return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, dataLayout);
+    return InstanceNormTest2<armnn::DataType::Float32>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
 }
 
 LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test2(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout)
 {
-    return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, dataLayout);
+    return InstanceNormTest2<armnn::DataType::Float16>(workloadFactory, memoryManager, tensorHandleFactory, dataLayout);
 }
diff --git a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp
index b79ba5f..d28069a 100644
--- a/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/InstanceNormalizationTestImpl.hpp
@@ -17,20 +17,24 @@
 LayerTestResult<float, 4> InstanceNormFloat32Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout);
 
 LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout);
 
 LayerTestResult<float, 4> InstanceNormFloat32Test2(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout);
 
 LayerTestResult<armnn::Half, 4> InstanceNormFloat16Test2(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     armnn::DataLayout dataLayout);
 
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
index 6c3fe5b..227ac63 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.cpp
@@ -23,6 +23,7 @@
 LayerTestResult<T, 4> L2NormalizationTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::TensorShape& inputOutputTensorShape,
     float scale,
     int32_t offset,
@@ -68,10 +69,8 @@
                                                         outputTensorInfo.GetQuantizationScale(),
                                                         outputTensorInfo.GetQuantizationOffset()));
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::L2NormalizationQueueDescriptor descriptor;
     descriptor.m_Parameters.m_Eps = epsilon;
@@ -107,6 +106,7 @@
 LayerTestResult<T, 4> L2NormalizationEpsilonTestCommon(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float scale,
         int32_t offset,
         float outScale,
@@ -151,6 +151,7 @@
     return L2NormalizationTestImpl<ArmnnType>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         inputOutputShape,
         scale,
         offset,
@@ -167,6 +168,7 @@
 LayerTestResult<T, 4> L2Normalization1dTestCommon(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         float scale,
         int32_t offset,
         float outScale,
@@ -237,6 +239,7 @@
     return L2NormalizationTestImpl<ArmnnType>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         inputOutputShape,
         scale,
         offset,
@@ -251,6 +254,7 @@
 LayerTestResult<T, 4> L2Normalization2dTestCommon(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float scale,
     int32_t offset,
     float outScale,
@@ -296,6 +300,7 @@
     return L2NormalizationTestImpl<ArmnnType>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         inputOutputShape,
         scale,
         offset,
@@ -310,6 +315,7 @@
 LayerTestResult<T, 4> L2Normalization3dTestCommon(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float scale,
     int32_t offset,
     float outScale,
@@ -375,6 +381,7 @@
     return L2NormalizationTestImpl<ArmnnType>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         inputOutputShape,
         scale,
         offset,
@@ -389,6 +396,7 @@
 LayerTestResult<T, 4> L2Normalization4dTestCommon(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float scale,
     int32_t offset,
     float outScale,
@@ -534,6 +542,7 @@
     return L2NormalizationTestImpl<ArmnnType>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         inputOutputShape,
         scale,
         offset,
@@ -549,6 +558,7 @@
 LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         const armnn::DataLayout layout)
 {
     // Dummy descriptor to get the default value of epsilon.
@@ -557,6 +567,7 @@
     return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         0.f,
         0,
         0.f,
@@ -568,11 +579,13 @@
 LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         const armnn::DataLayout layout)
 {
     return L2NormalizationEpsilonTestCommon<armnn::DataType::Float32>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         0.f,
         0,
         0.f,
@@ -584,11 +597,13 @@
 LayerTestResult<float, 4> L2Normalization1dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization1dTestCommon<armnn::DataType::Float32>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         0.f,
         0,
         0.f,
@@ -599,11 +614,13 @@
 LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         1.f,
         0,
         1.f,
@@ -614,11 +631,13 @@
 LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         1.f,
         0,
         1.f / 128,
@@ -629,11 +648,13 @@
 LayerTestResult<float, 4> L2Normalization2dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization2dTestCommon<armnn::DataType::Float32>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         0.f,
         0,
         0.f,
@@ -644,11 +665,13 @@
 LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         1.f,
         0,
         1.f,
@@ -659,11 +682,13 @@
 LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         1.f,
         0,
         1.f / 128,
@@ -673,7 +698,8 @@
 
 LayerTestResult<float, 2> L2Normalization2dShapeTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const armnn::DataLayout layout = armnn::DataLayout::NHWC;
     const armnn::TensorShape inputOutputTensorShape = armnn::TensorShape({ 5, 2 });
@@ -704,10 +730,8 @@
     LayerTestResult<float, 2> result(outputTensorInfo);
     result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, expectedOutputData);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::L2NormalizationQueueDescriptor descriptor;
     descriptor.m_Parameters.m_Eps = 1e-12f;
@@ -735,11 +759,13 @@
 LayerTestResult<float, 4> L2Normalization3dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization3dTestCommon<armnn::DataType::Float32>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         0.f,
         0,
         0.f,
@@ -750,11 +776,13 @@
 LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         1.f,
         0,
         1.f,
@@ -765,11 +793,13 @@
 LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         1.f,
         0,
         1.f / 128,
@@ -780,11 +810,13 @@
 LayerTestResult<float, 4> L2Normalization4dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization4dTestCommon<armnn::DataType::Float32>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         0.f,
         0,
         0.f,
@@ -795,11 +827,13 @@
 LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization1dTestCommon<armnn::DataType::QSymmS16>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         1.f,
         0,
         1.f,
@@ -810,11 +844,13 @@
 LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout)
 {
     return L2Normalization1dTestCommon<armnn::DataType::QAsymmU8>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         1.f,
         0,
         1.f / 128,
diff --git a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp
index b50fdcd..137ab7e 100644
--- a/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/L2NormalizationTestImpl.hpp
@@ -15,73 +15,88 @@
 LayerTestResult<float, 4> L2NormalizationDefaultEpsilonTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> L2NormalizationNonDefaultEpsilonTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> L2Normalization1dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<int16_t, 4> L2Normalization1dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<uint8_t, 4> L2Normalization1dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> L2Normalization2dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<int16_t, 4> L2Normalization2dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<uint8_t, 4> L2Normalization2dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<float, 2> L2Normalization2dShapeTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 4> L2Normalization3dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<int16_t, 4> L2Normalization3dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<uint8_t, 4> L2Normalization3dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<float, 4> L2Normalization4dTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<int16_t, 4> L2Normalization4dInt16Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
 
 LayerTestResult<uint8_t, 4> L2Normalization4dUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::DataLayout layout);
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
index 979e36a..7ee7a34 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.cpp
@@ -28,6 +28,7 @@
 LayerTestResult<T, NumDims> LogSoftmaxTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const armnn::TensorInfo& inputInfo,
     const armnn::TensorInfo& outputInfo,
     const std::vector<float>& inputValues,
@@ -41,10 +42,8 @@
     result.outputExpected =
         MakeTensor<T, NumDims>(outputInfo, armnnUtils::QuantizedVector<T>(expectedOutputValues, qScale, qOffset));
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = tensorHandleFactory.CreateTensorHandle(inputInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
 
     armnn::WorkloadInfo info;
 
@@ -71,7 +70,8 @@
 template<armnn::DataType ArmnnType, typename T>
 LayerTestResult<T, 4> LogSoftmaxTest1(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
 
@@ -97,6 +97,7 @@
     return LogSoftmaxTestImpl<ArmnnType, 4>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         inputTensorInfo,
         outputTensorInfo,
         inputValues,
@@ -107,7 +108,8 @@
 template<armnn::DataType ArmnnType, typename T>
 LayerTestResult<T, 4> LogSoftmaxTest2(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
 
@@ -133,6 +135,7 @@
     return LogSoftmaxTestImpl<ArmnnType, 4>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         inputTensorInfo,
         outputTensorInfo,
         inputValues,
@@ -143,7 +146,8 @@
 template<armnn::DataType ArmnnType, typename T>
 LayerTestResult<T, 4> LogSoftmaxTest3(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
 
@@ -169,6 +173,7 @@
     return LogSoftmaxTestImpl<ArmnnType, 4>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         inputTensorInfo,
         outputTensorInfo,
         inputValues,
@@ -179,7 +184,8 @@
 template<armnn::DataType ArmnnType, typename T>
 LayerTestResult<T, 4> LogSoftmaxTest4(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const armnn::TensorShape inputOutputShape{1, 1, 2, 4};
 
@@ -205,6 +211,7 @@
     return LogSoftmaxTestImpl<ArmnnType, 4>(
         workloadFactory,
         memoryManager,
+        tensorHandleFactory,
         inputTensorInfo,
         outputTensorInfo,
         inputValues,
@@ -215,39 +222,47 @@
 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
 LogSoftmaxTest1<armnn::DataType::Float32>(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
 LogSoftmaxTest2<armnn::DataType::Float32>(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
 LogSoftmaxTest3<armnn::DataType::Float32>(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float32>, 4>
 LogSoftmaxTest4<armnn::DataType::Float32>(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
 LogSoftmaxTest1<armnn::DataType::Float16>(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
 LogSoftmaxTest2<armnn::DataType::Float16>(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
 LogSoftmaxTest3<armnn::DataType::Float16>(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template LayerTestResult<armnn::ResolveType<armnn::DataType::Float16>, 4>
 LogSoftmaxTest4<armnn::DataType::Float16>(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp
index af18d69..1f4cc89 100644
--- a/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/LogSoftmaxTestImpl.hpp
@@ -15,19 +15,23 @@
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 4> LogSoftmaxTest1(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 4> LogSoftmaxTest2(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template<armnn::DataType ArmnnType,typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 4> LogSoftmaxTest3(
     armnn::IWorkloadFactory& workloadFactory,
-   const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 template<armnn::DataType ArmnnType,typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 4> LogSoftmaxTest4(
     armnn::IWorkloadFactory& workloadFactory,
-   const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 946764b..8f39f42 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -136,6 +136,7 @@
 LstmNoCifgNoPeepholeNoProjectionTestImpl(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         const boost::multi_array<T, 2>& input,
         const boost::multi_array<T, 2>& outputExpected,
         float qScale = 0.0f,
@@ -183,20 +184,19 @@
     outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
     ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, outputVector);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
-            workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
-            workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
 
-    std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> scratchHandle =
+            tensorHandleFactory.CreateTensorHandle(scratchBufferTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
-            workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateOutTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
-            workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+            tensorHandleFactory.CreateTensorHandle(cellStateOutTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::LstmQueueDescriptor data;
     armnn::WorkloadInfo info;
@@ -340,6 +340,7 @@
 LayerTestResult<T, 2>
 LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory,
                                                   const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+                                                  const armnn::ITensorHandleFactory& tensorHandleFactory,
                                                   const boost::multi_array<T, 2>& input,
                                                   const boost::multi_array<T, 2>& outputExpected,
                                                   float qScale = 0.0f,
@@ -387,20 +388,19 @@
     outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
     ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, outputVector);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
-            workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
-            workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
 
-    std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> scratchHandle =
+            tensorHandleFactory.CreateTensorHandle(scratchBufferTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
-            workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateOutTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
-            workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+            tensorHandleFactory.CreateTensorHandle(cellStateOutTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::LstmQueueDescriptor data;
     armnn::WorkloadInfo info;
@@ -1057,6 +1057,7 @@
 LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         const boost::multi_array<T, 2>& input,
         const boost::multi_array<T, 2>& outputExpected,
         float qScale = 0.0f,
@@ -1220,24 +1221,22 @@
     LayerTestResult<T, 2> ret3(outputTensorInfo);
     ret3.outputExpected = MakeTensor<T, 2>(outputTensorInfo, outputData);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     // Prepare the inputs and outputs for the workload
     std::unique_ptr<armnn::ITensorHandle> inputHandle =
-            workloadFactory.CreateTensorHandle(inputTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
-            workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
-            workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
 
     std::unique_ptr<armnn::ITensorHandle> scratchBufferHandle =
-            workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(scratchBufferTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
-            workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateOutTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
-            workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(cellStateOutTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle =
-            workloadFactory.CreateTensorHandle(outputTensorInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+            tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::WorkloadInfo info;
     AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
@@ -1284,6 +1283,7 @@
 LayerTestResult<T, 2>
 LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
                                                   const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+                                                  const armnn::ITensorHandleFactory& tensorHandleFactory,
                                                   const boost::multi_array<T, 2>& input,
                                                   const boost::multi_array<T, 2>& outputExpected,
                                                   float qScale = 0.0f,
@@ -1331,20 +1331,19 @@
     outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
     ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
-            workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
-            workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
 
-    std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> scratchHandle =
+            tensorHandleFactory.CreateTensorHandle(scratchBufferTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
-            workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateOutTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
-            workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+            tensorHandleFactory.CreateTensorHandle(cellStateOutTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::LstmQueueDescriptor data;
     armnn::WorkloadInfo info;
@@ -1556,6 +1555,7 @@
 LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     const boost::multi_array<uint8_t, 2>& input,
     const boost::multi_array<uint8_t, 2>& outputExpected)
 {
@@ -1617,18 +1617,16 @@
     outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
     ret.outputExpected = MakeTensor<uint8_t, 2>(outputStateInfo, outputVector);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     // Create tensor handles
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
-            workloadFactory.CreateTensorHandle(cellStateInfo);
+            tensorHandleFactory.CreateTensorHandle(cellStateInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
-            workloadFactory.CreateTensorHandle(outputStateInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateInfo);
 
     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
-            workloadFactory.CreateTensorHandle(cellStateInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputStateInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+            tensorHandleFactory.CreateTensorHandle(cellStateInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputStateInfo);
 
     armnn::QuantizedLstmQueueDescriptor data;
     armnn::WorkloadInfo info;
@@ -1746,6 +1744,7 @@
 LayerTestResult<int8_t, 2> QLstmTestImpl(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         const boost::multi_array<int8_t, 2>& input,
         const boost::multi_array<int8_t, 2>& outputExpected)
 {
@@ -1828,19 +1827,18 @@
     outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
     ret.outputExpected = MakeTensor<int8_t, 2>(outputStateInfo, outputVector);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     // Create tensor handles
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
-            workloadFactory.CreateTensorHandle(cellStateInfo);
+            tensorHandleFactory.CreateTensorHandle(cellStateInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
-            workloadFactory.CreateTensorHandle(outputStateInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateInfo);
 
-    std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle = workloadFactory.CreateTensorHandle(outputStateInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
+            tensorHandleFactory.CreateTensorHandle(outputStateInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
-            workloadFactory.CreateTensorHandle(cellStateInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputStateInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+            tensorHandleFactory.CreateTensorHandle(cellStateInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputStateInfo);
 
     armnn::QLstmQueueDescriptor data;
     armnn::WorkloadInfo info;
@@ -1984,6 +1982,7 @@
 LayerTestResult<int8_t, 2> QLstmTestImpl1(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         const boost::multi_array<int8_t, 2>& input,
         const boost::multi_array<int8_t, 2>& outputExpected)
 {
@@ -2068,19 +2067,18 @@
     outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
     ret.outputExpected = MakeTensor<int8_t, 2>(outputStateInfo, outputVector);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     // Create tensor handles
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
-            workloadFactory.CreateTensorHandle(cellStateInfo);
+            tensorHandleFactory.CreateTensorHandle(cellStateInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
-            workloadFactory.CreateTensorHandle(outputStateInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateInfo);
 
-    std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle = workloadFactory.CreateTensorHandle(outputStateInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
+            tensorHandleFactory.CreateTensorHandle(outputStateInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
-            workloadFactory.CreateTensorHandle(cellStateInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputStateInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+            tensorHandleFactory.CreateTensorHandle(cellStateInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputStateInfo);
 
     armnn::QLstmQueueDescriptor data;
     armnn::WorkloadInfo info;
@@ -2256,6 +2254,7 @@
 LayerTestResult<int8_t, 2> QLstmTestImpl2(
         armnn::IWorkloadFactory& workloadFactory,
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::ITensorHandleFactory& tensorHandleFactory,
         const boost::multi_array<int8_t, 2>& input,
         const boost::multi_array<int8_t, 2>& outputExpected)
 {
@@ -2340,19 +2339,18 @@
     outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
     ret.outputExpected = MakeTensor<int8_t, 2>(outputStateInfo, outputVector);
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
     // Create tensor handles
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
-            workloadFactory.CreateTensorHandle(cellStateInfo);
+            tensorHandleFactory.CreateTensorHandle(cellStateInfo);
     std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
-            workloadFactory.CreateTensorHandle(outputStateInfo);
+            tensorHandleFactory.CreateTensorHandle(outputStateInfo);
 
-    std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle = workloadFactory.CreateTensorHandle(outputStateInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
+            tensorHandleFactory.CreateTensorHandle(outputStateInfo);
     std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
-            workloadFactory.CreateTensorHandle(cellStateInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputStateInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+            tensorHandleFactory.CreateTensorHandle(cellStateInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputStateInfo);
 
     armnn::QLstmQueueDescriptor data;
     armnn::WorkloadInfo info;
@@ -2661,7 +2659,8 @@
 
 LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
@@ -2672,12 +2671,13 @@
             {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
              -0.42734814f, -0.00478661f,  0.13455015f, -0.03560682f}));
     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, input, expectedOutput);
+        workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
 }
 
 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
@@ -2694,12 +2694,13 @@
              0.00223253f,   -0.00957321f, 0.0210624f,   0.013331f,    0.0150954f,
              0.02168f}));
     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, input, expectedOutput);
+        workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
 }
 
 LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
@@ -2711,12 +2712,13 @@
               -0.0185422f,   0.11281417f,  0.24466537f, -0.1826292f}}));
 
     return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
-        workloadFactory, memoryManager, input, expectedOutput);
+        workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
 }
 
 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
     boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
@@ -2728,12 +2730,13 @@
             {  0.0244077f,  0.128027f, -0.00170918f,    //batch 0
              -0.00692428f, 0.0848741f,    0.063445f})); //batch 1
     return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
-            workloadFactory, memoryManager, input, expectedOutput);
+        workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
 }
 
 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const float qScale = 1.0f;
     const int32_t qOffset = 0;
@@ -2757,13 +2760,14 @@
             qScale, qOffset));
 
     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
-        workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
+        workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype);
 
 }
 
 LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const float qScale = 1.0f;
     const int32_t qOffset = 0;
@@ -2789,12 +2793,13 @@
                 qScale, qOffset));
 
     return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
-        workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
+        workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype);
 }
 
 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const float qScale = 2.0f;
     const int32_t qOffset = 0;
@@ -2831,12 +2836,13 @@
                 qScale, qOffset));
 
     return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
-        workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
+        workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, constantDatatype);
 }
 
 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     const float qScale = 1.0f;
     const int32_t qOffset = 0;
@@ -2860,7 +2866,7 @@
                 qScale, qOffset));
 
     return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
-        workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
+        workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput, qScale, qOffset, datatype);
 }
 
 //
@@ -2869,7 +2875,8 @@
 
 LayerTestResult<uint8_t, 2> QuantizedLstmTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QAsymmU8);
     boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
@@ -2879,13 +2886,14 @@
     boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
         {140, 151, 146, 112, 136, 156, 142, 112 }));
 
-    return QuantizedLstmTestImpl(workloadFactory, memoryManager, input, expectedOutput);
+    return QuantizedLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
 }
 
 // QLSTM
 LayerTestResult<int8_t, 2> QLstmTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8);
     boost::multi_array<int8_t, 2> input = MakeTensor<int8_t, 2>(inputDesc, std::vector<int8_t>(
@@ -2895,12 +2903,13 @@
     boost::multi_array<int8_t, 2> expectedOutput = MakeTensor<int8_t, 2>(outputDesc, std::vector<int8_t>(
             {-15, 21, 14, 20, -15, 15, 5, 27}));
 
-    return QLstmTestImpl(workloadFactory, memoryManager, input, expectedOutput);
+    return QLstmTestImpl(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
 }
 
 LayerTestResult<int8_t, 2> QLstmTest1(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8);
     boost::multi_array<int8_t, 2> input = MakeTensor<int8_t, 2>(inputDesc, std::vector<int8_t>(
@@ -2910,12 +2919,13 @@
     boost::multi_array<int8_t, 2> expectedOutput = MakeTensor<int8_t, 2>(outputDesc, std::vector<int8_t>(
             {127, 127, -108, -67, 127, 127}));
 
-    return QLstmTestImpl1(workloadFactory, memoryManager, input, expectedOutput);
+    return QLstmTestImpl1(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
 }
 
 LayerTestResult<int8_t, 2> QLstmTest2(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
     armnn::TensorInfo inputDesc({2, 5}, armnn::DataType::QAsymmS8);
     boost::multi_array<int8_t, 2> input = MakeTensor<int8_t, 2>(inputDesc, std::vector<int8_t>(
@@ -2925,5 +2935,5 @@
     boost::multi_array<int8_t, 2> expectedOutput = MakeTensor<int8_t, 2>(outputDesc, std::vector<int8_t>(
             {127, 127, 127, -128, 127, 127}));
 
-    return QLstmTestImpl2(workloadFactory, memoryManager, input, expectedOutput);
+    return QLstmTestImpl2(workloadFactory, memoryManager, tensorHandleFactory, input, expectedOutput);
 }
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.hpp
index 6e29345..d27ddd6 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.hpp
@@ -21,35 +21,43 @@
 
 LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
-        armnn::IWorkloadFactory& workloadFactory,
-        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 //
 // QuantizedLstm
@@ -57,7 +65,8 @@
 
 LayerTestResult<uint8_t, 2> QuantizedLstmTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 //
 // QLstm
@@ -65,12 +74,15 @@
 
 LayerTestResult<int8_t, 2> QLstmTest(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int8_t, 2> QLstmTest1(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
 
 LayerTestResult<int8_t, 2> QLstmTest2(
     armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
\ No newline at end of file
+    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+    const armnn::ITensorHandleFactory& tensorHandleFactory);
\ No newline at end of file
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 67ae73e..ec8f71e 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -339,33 +339,33 @@
 ARMNN_AUTO_TEST_CASE(BatchNormFloat32Nhwc, BatchNormFloat32NhwcTest)
 
 // InstanceNormalization
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nchw, InstanceNormFloat16Test, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nchw, InstanceNormFloat16Test, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nhwc, InstanceNormFloat32Test, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nhwc, InstanceNormFloat16Test, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nhwc, InstanceNormFloat32Test, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nhwc, InstanceNormFloat16Test, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nchw2, InstanceNormFloat32Test2, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nchw2, InstanceNormFloat16Test2, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw2, InstanceNormFloat32Test2, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nchw2, InstanceNormFloat16Test2, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nhwc2, InstanceNormFloat32Test2, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nhwc2, InstanceNormFloat16Test2, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nhwc2, InstanceNormFloat32Test2, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nhwc2, InstanceNormFloat16Test2, DataLayout::NHWC);
 
 // L2 Normalization
-ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1d, L2Normalization1dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2d, L2Normalization2dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3d, L2Normalization3dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4d, L2Normalization4dTest, DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1dNhwc, L2Normalization1dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dNhwc, L2Normalization2dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3dNhwc, L2Normalization3dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4dNhwc, L2Normalization4dTest, DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE(L2Normalization2dShape, L2Normalization2dShapeTest);
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dShape, L2Normalization2dShapeTest);
 
-ARMNN_AUTO_TEST_CASE(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilonTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilonTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, DataLayout::NCHW)
 
 // Constant
 ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest)
@@ -473,10 +473,10 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFloor, SimpleFloorTest<DataType::Float32>)
 
 // Gather
-ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat32, Gather1dParamsFloat32Test)
-ARMNN_AUTO_TEST_CASE(Gather1dParamsUint8, Gather1dParamsUint8Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsFloat32, Gather1dParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsUint8, Gather1dParamsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
 
 // Reshape
 ARMNN_AUTO_TEST_CASE(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>)
@@ -519,21 +519,21 @@
 ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
 
 // Lstm
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
-                     LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgNoPeepholeNoProjection,
-                     LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjection,
-                     LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
+                              LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgNoPeepholeNoProjection,
+                              LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjection,
+                              LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest)
 
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNorm,
-                     LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNorm,
+                              LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest)
 
 // QLstm
-ARMNN_AUTO_TEST_CASE(QLstm, QLstmTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm, QLstmTest)
 
 // QuantizedLstm
-ARMNN_AUTO_TEST_CASE(QuantizedLstm, QuantizedLstmTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(QuantizedLstm, QuantizedLstmTest)
 
 // Convert from Float16 to Float32
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test)
@@ -665,7 +665,7 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f)
 
 // LogSoftmax
-ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>)
 
 // Space To Batch Nd
 ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleFloat32, SpaceToBatchNdSimpleFloat32Test)
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index 4c0d6a6..40f4388 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -503,7 +503,7 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f)
 
 // LogSoftmax
-ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>)
 
 // Space To Batch Nd
 ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleFloat32, SpaceToBatchNdSimpleFloat32Test)
@@ -601,11 +601,11 @@
 ARMNN_AUTO_TEST_CASE(BatchNormFloat32Nhwc, BatchNormFloat32NhwcTest)
 
 // InstanceNormalization
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nhwc, InstanceNormFloat32Test, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nhwc, InstanceNormFloat32Test, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nchw2, InstanceNormFloat32Test2, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nhwc2, InstanceNormFloat32Test2, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw2, InstanceNormFloat32Test2, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nhwc2, InstanceNormFloat32Test2, DataLayout::NHWC);
 
 // Constant
 ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest)
@@ -654,29 +654,29 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test, false)
 
 // L2 Normalization
-ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1d, L2Normalization1dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2d, L2Normalization2dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3d, L2Normalization3dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4d, L2Normalization4dTest, DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1dNhwc, L2Normalization1dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dNhwc, L2Normalization2dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3dNhwc, L2Normalization3dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4dNhwc, L2Normalization4dTest, DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE(L2Normalization2dShape, L2Normalization2dShapeTest);
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dShape, L2Normalization2dShapeTest);
 
-ARMNN_AUTO_TEST_CASE(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilonTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilonTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, DataLayout::NCHW)
 
 // Floor
 ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFloor, SimpleFloorTest<DataType::Float32>)
 
 // Gather
-ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat32, Gather1dParamsFloat32Test)
-ARMNN_AUTO_TEST_CASE(Gather1dParamsUint8, Gather1dParamsUint8Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsFloat32, Gather1dParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsUint8, Gather1dParamsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
 
 // Equal
 ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimple,            EqualSimpleTest)
@@ -769,22 +769,22 @@
 ARMNN_AUTO_TEST_CASE(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
 
 // Lstm
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
-                     LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgNoPeepholeNoProjection,
-                     LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjection,
-                     LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest)
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNorm,
-                     LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
+                              LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgNoPeepholeNoProjection,
+                              LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjection,
+                              LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNorm,
+                              LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest)
 
 // QLstm
-ARMNN_AUTO_TEST_CASE(QLstm, QLstmTest)
-ARMNN_AUTO_TEST_CASE(QLstm1, QLstmTest1)
-ARMNN_AUTO_TEST_CASE(QLstm2, QLstmTest2)
+ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm, QLstmTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm1, QLstmTest1)
+ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm2, QLstmTest2)
 
 // QuantizedLstm
-ARMNN_AUTO_TEST_CASE(QuantizedLstm, QuantizedLstmTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(QuantizedLstm, QuantizedLstmTest)
 
 // Mean
 ARMNN_AUTO_TEST_CASE(MeanSimpleFloat32, MeanSimpleTest<DataType::Float32>)
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 4feba22..fd77d25 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -453,17 +453,17 @@
 ARMNN_AUTO_TEST_CASE(ConstantLinearActivationInt16, ConstantLinearActivationInt16Test)
 
 // InstanceNormalization
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nchw, InstanceNormFloat16Test, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nchw, InstanceNormFloat16Test, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nhwc, InstanceNormFloat32Test, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nhwc, InstanceNormFloat16Test, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nhwc, InstanceNormFloat32Test, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nhwc, InstanceNormFloat16Test, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nchw2, InstanceNormFloat32Test2, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nchw2, InstanceNormFloat16Test2, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw2, InstanceNormFloat32Test2, DataLayout::NCHW);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nchw2, InstanceNormFloat16Test2, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat32Nhwc2, InstanceNormFloat32Test2, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(InstanceNormFloat16Nhwc2, InstanceNormFloat16Test2, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nhwc2, InstanceNormFloat32Test2, DataLayout::NHWC);
+ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nhwc2, InstanceNormFloat16Test2, DataLayout::NHWC);
 
 // Normalization
 ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
@@ -1243,51 +1243,51 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(FakeQuantization, FakeQuantizationTest)
 
 // L2 Normalization
-ARMNN_AUTO_TEST_CASE(L2Normalization1d, L2Normalization1dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization2d, L2Normalization2dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization3d, L2Normalization3dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization4d, L2Normalization4dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1d, L2Normalization1dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2d, L2Normalization2dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3d, L2Normalization3dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4d, L2Normalization4dTest, DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE(L2Normalization1dInt16, L2Normalization1dInt16Test, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization2dInt16, L2Normalization2dInt16Test, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization3dInt16, L2Normalization3dInt16Test, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization4dInt16, L2Normalization4dInt16Test, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1dInt16, L2Normalization1dInt16Test, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dInt16, L2Normalization2dInt16Test, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3dInt16, L2Normalization3dInt16Test, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4dInt16, L2Normalization4dInt16Test, DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE(L2Normalization1dUint8, L2Normalization1dUint8Test, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization2dUint8, L2Normalization2dUint8Test, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization3dUint8, L2Normalization3dUint8Test, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2Normalization4dUint8, L2Normalization4dUint8Test, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1dUint8, L2Normalization1dUint8Test, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dUint8, L2Normalization2dUint8Test, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3dUint8, L2Normalization3dUint8Test, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4dUint8, L2Normalization4dUint8Test, DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE(L2Normalization1dNhwc, L2Normalization1dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization2dNhwc, L2Normalization2dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1dNhwc, L2Normalization1dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dNhwc, L2Normalization2dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3dNhwc, L2Normalization3dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4dNhwc, L2Normalization4dTest, DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE(L2Normalization1dInt16Nhwc, L2Normalization1dInt16Test, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization2dInt16Nhwc, L2Normalization2dInt16Test, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization3dInt16Nhwc, L2Normalization3dInt16Test, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization4dInt16Nhwc, L2Normalization4dInt16Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1dInt16Nhwc, L2Normalization1dInt16Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dInt16Nhwc, L2Normalization2dInt16Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3dInt16Nhwc, L2Normalization3dInt16Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4dInt16Nhwc, L2Normalization4dInt16Test, DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE(L2Normalization1dUint8Nhwc, L2Normalization1dUint8Test, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization2dUint8Nhwc, L2Normalization2dUint8Test, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization3dUint8Nhwc, L2Normalization3dUint8Test, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(L2Normalization4dUint8Nhwc, L2Normalization4dUint8Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1dUint8Nhwc, L2Normalization1dUint8Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dUint8Nhwc, L2Normalization2dUint8Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3dUint8Nhwc, L2Normalization3dUint8Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4dUint8Nhwc, L2Normalization4dUint8Test, DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE(L2Normalization2dShape, L2Normalization2dShapeTest);
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dShape, L2Normalization2dShapeTest);
 
-ARMNN_AUTO_TEST_CASE(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilonTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilonTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE_WITH_THF(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, DataLayout::NCHW)
 
 // LogSoftmax
-ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_2, LogSoftmaxTest2<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_3, LogSoftmaxTest3<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat32_4, LogSoftmaxTest4<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat32_2, LogSoftmaxTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat32_3, LogSoftmaxTest3<DataType::Float32>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat32_4, LogSoftmaxTest4<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat16_1, LogSoftmaxTest1<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat16_2, LogSoftmaxTest2<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat16_3, LogSoftmaxTest3<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE(LogSoftmaxFloat16_4, LogSoftmaxTest4<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_1, LogSoftmaxTest1<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_2, LogSoftmaxTest2<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_3, LogSoftmaxTest3<DataType::Float16>)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat16_4, LogSoftmaxTest4<DataType::Float16>)
 
 // Pad
 ARMNN_AUTO_TEST_CASE(PadBFloat162d, PadBFloat162dTest)
@@ -1430,29 +1430,29 @@
 BOOST_AUTO_TEST_CASE(LstmUtilsVectorBatchVectorAdd) {
                      LstmUtilsVectorBatchVectorAddTest(); }
 
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
-                     LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgNoPeepholeNoProjection,
-                     LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjection,
-                     LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
+                              LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgNoPeepholeNoProjection,
+                              LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjection,
+                              LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest)
 
-ARMNN_AUTO_TEST_CASE(LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNorm,
-                     LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNorm,
+                              LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest)
 
-ARMNN_AUTO_TEST_CASE(LstmLayerInt16NoCifgNoPeepholeNoProjection,
-                     LstmLayerInt16NoCifgNoPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE(LstmLayerInt16WithCifgWithPeepholeNoProjection,
-                     LstmLayerInt16WithCifgWithPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE(LstmLayerInt16NoCifgWithPeepholeWithProjection,
-                     LstmLayerInt16NoCifgWithPeepholeWithProjectionTest)
-ARMNN_AUTO_TEST_CASE(LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16Constant,
-                     LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerInt16NoCifgNoPeepholeNoProjection,
+                              LstmLayerInt16NoCifgNoPeepholeNoProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerInt16WithCifgWithPeepholeNoProjection,
+                              LstmLayerInt16WithCifgWithPeepholeNoProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerInt16NoCifgWithPeepholeWithProjection,
+                              LstmLayerInt16NoCifgWithPeepholeWithProjectionTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16Constant,
+                              LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest)
 
 // QLstm
-ARMNN_AUTO_TEST_CASE(QLstm, QLstmTest)
-ARMNN_AUTO_TEST_CASE(QLstm1, QLstmTest1)
-ARMNN_AUTO_TEST_CASE(QLstm2, QLstmTest2)
+ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm, QLstmTest)
+ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm1, QLstmTest1)
+ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm2, QLstmTest2)
 
 // Convert from BFloat16 to Float32
 ARMNN_AUTO_TEST_CASE_WITH_THF(ConvertBf16ToFp32, ConvertBf16ToFp32Test)
@@ -1805,21 +1805,23 @@
 ARMNN_AUTO_TEST_CASE(Debug1dQSymm16, Debug1dInt16Test)
 
 // Gather
-ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat32, Gather1dParamsFloat32Test)
-ARMNN_AUTO_TEST_CASE(Gather1dParamsFloat16, Gather1dParamsFloat16Test)
-ARMNN_AUTO_TEST_CASE(Gather1dParamsUint8, Gather1dParamsUint8Test)
-ARMNN_AUTO_TEST_CASE(Gather1dParamsInt16, Gather1dParamsInt16Test)
-ARMNN_AUTO_TEST_CASE(Gather1dParamsInt32, Gather1dParamsInt32Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsFloat16, GatherMultiDimParamsFloat16Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsInt16, GatherMultiDimParamsInt16Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsInt32, GatherMultiDimParamsInt32Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesFloat32, GatherMultiDimParamsMultiDimIndicesFloat32Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesFloat16, GatherMultiDimParamsMultiDimIndicesFloat16Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesUint8, GatherMultiDimParamsMultiDimIndicesUint8Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesInt16, GatherMultiDimParamsMultiDimIndicesInt16Test)
-ARMNN_AUTO_TEST_CASE(GatherMultiDimParamsMultiDimIndicesInt32, GatherMultiDimParamsMultiDimIndicesInt32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsFloat32, Gather1dParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsFloat16, Gather1dParamsFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsUint8, Gather1dParamsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsInt16, Gather1dParamsInt16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsInt32, Gather1dParamsInt32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsFloat16, GatherMultiDimParamsFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsInt16, GatherMultiDimParamsInt16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsInt32, GatherMultiDimParamsInt32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsMultiDimIndicesFloat32,
+                              GatherMultiDimParamsMultiDimIndicesFloat32Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsMultiDimIndicesFloat16,
+                              GatherMultiDimParamsMultiDimIndicesFloat16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsMultiDimIndicesUint8, GatherMultiDimParamsMultiDimIndicesUint8Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsMultiDimIndicesInt16, GatherMultiDimParamsMultiDimIndicesInt16Test)
+ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsMultiDimIndicesInt32, GatherMultiDimParamsMultiDimIndicesInt32Test)
 
 // Abs
 ARMNN_AUTO_TEST_CASE(Abs2d, Abs2dTest<DataType::Float32>)