IVGCVSW-4641 Investigate Hal 1.3 VTS Failures

* Add QASYMM8_SIGNED data type support to NeonTensorHandle

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Iae34f7d67de83642606ccd8c61a1b72df7f2bb3a
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index 7a75f9c..00ebc9c 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -264,6 +264,7 @@
             return arm_compute::PixelValue(static_cast<uint8_t>(pixelValue));
         case arm_compute::DataType::QSYMM16:
             return arm_compute::PixelValue(static_cast<int16_t>(pixelValue));
+        case arm_compute::DataType::QASYMM8_SIGNED:
         case arm_compute::DataType::QSYMM8_PER_CHANNEL:
             return arm_compute::PixelValue(static_cast<int8_t>(pixelValue));
         default:
diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp
index fb2c2b5..f251034 100644
--- a/src/backends/neon/NeonTensorHandle.hpp
+++ b/src/backends/neon/NeonTensorHandle.hpp
@@ -179,6 +179,10 @@
                 armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
                                                                  static_cast<uint8_t*>(memory));
                 break;
+            case arm_compute::DataType::QASYMM8_SIGNED:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<int8_t*>(memory));
+                break;
             case arm_compute::DataType::BFLOAT16:
                 armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
                                                                  static_cast<armnn::BFloat16*>(memory));
@@ -217,6 +221,10 @@
                 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
                                                                  this->GetTensor());
                 break;
+            case arm_compute::DataType::QASYMM8_SIGNED:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
+                                                                 this->GetTensor());
+                break;
             case arm_compute::DataType::BFLOAT16:
                 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const armnn::BFloat16*>(memory),
                                                                  this->GetTensor());
@@ -305,6 +313,10 @@
                 armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
                                                                  static_cast<uint8_t*>(memory));
                 break;
+            case arm_compute::DataType::QASYMM8_SIGNED:
+                armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
+                                                                 static_cast<int8_t*>(memory));
+                break;
             case arm_compute::DataType::S16:
             case arm_compute::DataType::QSYMM16:
                 armcomputetensorutils::CopyArmComputeITensorData(this->GetTensor(),
@@ -335,6 +347,10 @@
                 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const uint8_t*>(memory),
                                                                  this->GetTensor());
                 break;
+            case arm_compute::DataType::QASYMM8_SIGNED:
+                armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int8_t*>(memory),
+                                                                 this->GetTensor());
+                break;
             case arm_compute::DataType::S16:
             case arm_compute::DataType::QSYMM16:
                 armcomputetensorutils::CopyArmComputeITensorData(static_cast<const int16_t*>(memory),
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index c3c9d3d..860a835 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -52,6 +52,7 @@
         case DataType::QuantizedSymm8PerAxis:
             ARMNN_FALLTHROUGH;
         case DataType::QSymmS8:
+        case DataType::QAsymmS8:
             CopyArmComputeTensorData(tensor, handle->GetConstTensor<int8_t>());
             break;
         ARMNN_NO_DEPRECATE_WARN_END