IVGCVSW-5549 Failing Transpose Conv2d Uint8 test

 * Changing Uint8 tests to Int8 for Delegate Transpose Conv2d
 * Refactor of Quantization tests to be per backend

Signed-off-by: David Monahan <david.monahan@arm.com>
Change-Id: Ic1528e1dc339030c7f1eed9f3884e99b14f389e4
diff --git a/delegate/src/test/Convolution2dTest.cpp b/delegate/src/test/Convolution2dTest.cpp
index 6c50f8d..2ce2944 100644
--- a/delegate/src/test/Convolution2dTest.cpp
+++ b/delegate/src/test/Convolution2dTest.cpp
@@ -271,7 +271,7 @@
 
 } //End of TEST_SUITE("Convolution2dTest_GpuAcc")
 
-void TransposeConvUint8Test(std::vector<armnn::BackendId>& backends)
+void TransposeConvInt8Test(std::vector<armnn::BackendId>& backends)
 {
     // Set input data
     std::vector<int32_t> transposeTensorShape { 4 };
@@ -280,9 +280,9 @@
     std::vector<int32_t> outputShape { 1, 3, 3, 1 };
 
     std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
-    static std::vector<uint8_t> inputValues = { 1, 2, 3, 4 };
-    std::vector<uint8_t> filterValues = { 0, 1, 2, 4 };
-    std::vector<uint8_t> expectedOutputValues =
+    static std::vector<int8_t> inputValues = { 1, 2, 3, 4 };
+    std::vector<int8_t> filterValues = { 0, 1, 2, 4 };
+    std::vector<int8_t> expectedOutputValues =
         {
             0, 1,  2,
             2, 11, 12,
@@ -290,8 +290,8 @@
         };
 
     tflite::Padding padding = tflite::Padding_VALID;
-    TransposeConvTest<uint8_t>(backends,
-                             ::tflite::TensorType_UINT8,
+    TransposeConvTest<int8_t>(backends,
+                             ::tflite::TensorType_INT8,
                              1, // strideX
                              1, // strideY
                              padding,
@@ -347,10 +347,10 @@
     TransposeConvFp32Test(backends);
 }
 
-TEST_CASE ("TransposeConv_Uint8_Test")
+TEST_CASE ("TransposeConv_Int8_Test")
 {
     std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
-    TransposeConvUint8Test(backends);
+    TransposeConvInt8Test(backends);
 }
 
 } // End of  TEST_SUITE(TransposeConv_CpuRef_Test)
@@ -360,14 +360,14 @@
 
 TEST_CASE ("TransposeConv_Fp32_Test")
 {
-std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-TransposeConvFp32Test(backends);
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    TransposeConvFp32Test(backends);
 }
 
-TEST_CASE ("TransposeConv_Uint8_Test")
+TEST_CASE ("TransposeConv_Int8_Test")
 {
-std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
-TransposeConvUint8Test(backends);
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    TransposeConvInt8Test(backends);
 }
 
 } // End of  TEST_SUITE(TransposeConv_CpuAcc_Test)
@@ -377,14 +377,14 @@
 
 TEST_CASE ("TransposeConv_Fp32_Test")
 {
-std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-TransposeConvFp32Test(backends);
+    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    TransposeConvFp32Test(backends);
 }
 
-TEST_CASE ("TransposeConv_Uint8_Test")
+TEST_CASE ("TransposeConv_Int8_Test")
 {
-std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
-TransposeConvUint8Test(backends);
+    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    TransposeConvInt8Test(backends);
 }
 
 } // End of  TEST_SUITE(TransposeConv_GpuAcc_Test)
diff --git a/delegate/src/test/QuantizationTest.cpp b/delegate/src/test/QuantizationTest.cpp
index f743f24..fbc2903 100644
--- a/delegate/src/test/QuantizationTest.cpp
+++ b/delegate/src/test/QuantizationTest.cpp
@@ -279,142 +279,174 @@
                                       expectedOutputValues);
 }
 
-TEST_SUITE("QuantizationTests")
+TEST_SUITE("CpuRef_QuantizationTests")
+{
+
+TEST_CASE ("DEQUANTIZE_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    DequantizeUint8Test(backends);
+}
+
+
+TEST_CASE ("DEQUANTIZE_INT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    DequantizeInt8Test(backends);
+}
+
+
+TEST_CASE ("DEQUANTIZE_INT16_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    DequantizeInt16Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeFloat32Uint8Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeFloat32Int8Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_FLOAT32_INT16_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeFloat32Int16Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_INT16_INT16_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeInt16Int16Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_INT16_INT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeInt16Int8Test(backends);
+}
+
+
+
+TEST_CASE ("QUANTIZE_INT8_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeInt8Uint8Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_UINT8_INT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeUint8Int8Test(backends);
+}
+
+}
+
+TEST_SUITE("CpuAcc_QuantizationTests")
+{
+
+// Dequantize Operator Tests
+TEST_CASE ("DEQUANTIZE_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    DequantizeUint8Test(backends);
+}
+
+TEST_CASE ("DEQUANTIZE_INT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    DequantizeInt8Test(backends);
+}
+
+TEST_CASE ("DEQUANTIZE_INT16_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    DequantizeInt16Test(backends);
+}
+
+// Quantize Operator Tests
+TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    QuantizeFloat32Uint8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    QuantizeFloat32Int8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_INT8_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    QuantizeInt8Uint8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_UINT8_INT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    QuantizeUint8Int8Test(backends);
+}
+
+}
+
+TEST_SUITE("GpuAcc_QuantizationTests")
 {
 
 // Dequantize Operator Tests
 TEST_CASE ("DEQUANTIZE_UINT8_GpuAcc_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    DequantizeUint8Test(backends);
-}
-
-TEST_CASE ("DEQUANTIZE_UINT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
-                                               armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     DequantizeUint8Test(backends);
 }
 
 TEST_CASE ("DEQUANTIZE_INT8_GpuAcc_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    DequantizeInt8Test(backends);
-}
-
-TEST_CASE ("DEQUANTIZE_INT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
-                                               armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     DequantizeInt8Test(backends);
 }
 
 TEST_CASE ("DEQUANTIZE_INT16_GpuAcc_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    DequantizeInt16Test(backends);
-}
-
-TEST_CASE ("DEQUANTIZE_INT16_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
-                                               armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     DequantizeInt16Test(backends);
 }
 
 // Quantize Operator Tests
 TEST_CASE ("QUANTIZE_FLOAT32_UINT8_GpuAcc_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    QuantizeFloat32Uint8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
-                                               armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     QuantizeFloat32Uint8Test(backends);
 }
 
 TEST_CASE ("QUANTIZE_FLOAT32_INT8_GpuAcc_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     QuantizeFloat32Int8Test(backends);
 }
 
-TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
-                                               armnn::Compute::CpuRef };
-    QuantizeFloat32Int8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_FLOAT32_INT16_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    QuantizeFloat32Int16Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_FLOAT32_INT16_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    QuantizeFloat32Int16Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_INT16_INT16_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    QuantizeInt16Int16Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_INT16_INT16_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    QuantizeInt16Int16Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_INT16_INT8_GpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    QuantizeInt16Int8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_INT16_INT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-    QuantizeInt16Int8Test(backends);
-}
-
 TEST_CASE ("QUANTIZE_INT8_UINT8_GpuAcc_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    QuantizeInt8Uint8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_INT8_UINT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
-                                               armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     QuantizeInt8Uint8Test(backends);
 }
 
 TEST_CASE ("QUANTIZE_UINT8_INT8_GpuAcc_Test")
 {
-    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
-                                               armnn::Compute::CpuRef };
-    QuantizeUint8Int8Test(backends);
-}
-
-TEST_CASE ("QUANTIZE_UINT8_INT8_CpuAcc_Test")
-{
-    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
-                                               armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     QuantizeUint8Int8Test(backends);
 }