IVGCVSW-2467 Remove GetDataType<T> function

Change-Id: I7359617a307b9abb4c30b3d5f2364dc6d0f828f0
diff --git a/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp b/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp
index 97199e3..25ceda1 100644
--- a/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp
@@ -19,7 +19,7 @@
 
 #include <algorithm>
 
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 2> SimpleSoftmaxTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -32,13 +32,13 @@
 
     unsigned int inputShape[] = { 2, 4 };
 
-    inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+    inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
     float qScale = 1.f / 256.f;
     int qOffset = 0;
     inputTensorInfo.SetQuantizationScale(qScale);
     inputTensorInfo.SetQuantizationOffset(qOffset);
 
-    outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+    outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
     outputTensorInfo.SetQuantizationScale(qScale);
     outputTensorInfo.SetQuantizationOffset(qOffset);
 
@@ -87,7 +87,7 @@
     return ret;
 }
 
-template<typename T>
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 2> CompareSoftmaxTestImpl(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
@@ -103,8 +103,8 @@
 
     unsigned int inputShape[] = { batchSize, channels };
 
-    inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
-    outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+    inputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
+    outputTensorInfo = armnn::TensorInfo(2, inputShape, ArmnnType);
     float qScale = 1.f / 256.f;
     int qOffset = 0;
     inputTensorInfo.SetQuantizationScale(qScale);