IVGCVSW-4370 Deprecate DataType::QuantizedSymm8PerAxis

!android-nn-driver:2622

Change-Id: If99d3eff71ff66ba28af1e5af248299fe04511b9
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index 1cad92f..04202ad 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -13,7 +13,7 @@
 namespace armcomputetensorutils
 {
 
-arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType)
+arm_compute::DataType GetArmComputeDataType(armnn::DataType dataType, bool multiScales)
 {
     switch(dataType)
     {
@@ -28,9 +28,13 @@
         case armnn::DataType::QSymmS16:
             return arm_compute::DataType::QSYMM16;
         case armnn::DataType::QSymmS8:
-            return arm_compute::DataType::QSYMM8;
+        {
+            return multiScales ? arm_compute::DataType::QSYMM8_PER_CHANNEL : arm_compute::DataType::QSYMM8;
+        }
+        ARMNN_NO_DEPRECATE_WARN_BEGIN
         case armnn::DataType::QuantizedSymm8PerAxis:
             return arm_compute::DataType::QSYMM8_PER_CHANNEL;
+        ARMNN_NO_DEPRECATE_WARN_END
         case armnn::DataType::Signed32:
             return arm_compute::DataType::S32;
         default:
@@ -109,10 +113,11 @@
 // ARM Compute Tensor and CLTensor allocators.
 arm_compute::TensorInfo BuildArmComputeTensorInfo(const armnn::TensorInfo& tensorInfo)
 {
+    bool multiScales = tensorInfo.HasMultipleQuantizationScales();
     const arm_compute::TensorShape aclTensorShape = BuildArmComputeTensorShape(tensorInfo.GetShape());
-    const arm_compute::DataType aclDataType       = GetArmComputeDataType(tensorInfo.GetDataType());
+    const arm_compute::DataType aclDataType       = GetArmComputeDataType(tensorInfo.GetDataType(), multiScales);
 
-    const arm_compute::QuantizationInfo aclQuantizationInfo = tensorInfo.HasMultipleQuantizationScales() ?
+    const arm_compute::QuantizationInfo aclQuantizationInfo = multiScales ?
         arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScales()) :
         arm_compute::QuantizationInfo(tensorInfo.GetQuantizationScale(), tensorInfo.GetQuantizationOffset());