IVGCVSW-5231 Remove CreateTensorHandle in the test where there is NO_DEPRECATE_WARN

 * Done for all elementwise layers, Activation, BatchNorm, BatchToSpace

Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: Id1d15a0960233026aecf7a07e0d3f006e07e4abf
diff --git a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
index 79eb2e4..ec5bfb0 100644
--- a/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/ElementwiseTestImpl.hpp
@@ -51,6 +51,7 @@
     int quantOffset1,
     const unsigned int outShape[NumDims],
     std::vector<TOutput> outValues,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float outQuantScale,
     int outQuantOffset)
 {
@@ -77,11 +78,9 @@
         ret.compareBoolean = true;
     }
 
-    ARMNN_NO_DEPRECATE_WARN_BEGIN
-    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
-    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-    ARMNN_NO_DEPRECATE_WARN_END
+    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo0);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = tensorHandleFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
 
     Descriptor data;
     armnn::WorkloadInfo info;
@@ -123,6 +122,7 @@
     int quantOffset1,
     const unsigned int outShape[NumDims],
     std::vector<T> outValues,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float outQuantScale,
     int outQuantOffset)
 {
@@ -139,6 +139,7 @@
         quantOffset1,
         outShape,
         outValues,
+        tensorHandleFactory,
         outQuantScale,
         outQuantOffset);
 }
@@ -158,6 +159,7 @@
     std::vector<TInput> values1,
     const unsigned int outShape[NumDims],
     std::vector<TOutput> outValues,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float quantScale = 1.0f,
     int quantOffset = 0)
 {
@@ -174,6 +176,7 @@
         quantOffset,
         outShape,
         outValues,
+        tensorHandleFactory,
         quantScale,
         quantOffset);
 }
@@ -191,6 +194,7 @@
     std::vector<T> values1,
     const unsigned int outShape[NumDims],
     std::vector<T> outValues,
+    const armnn::ITensorHandleFactory& tensorHandleFactory,
     float quantScale = 1.0f,
     int quantOffset = 0)
 {
@@ -203,6 +207,7 @@
         values1,
         outShape,
         outValues,
+        tensorHandleFactory,
         quantScale,
         quantOffset);
 }
\ No newline at end of file