IVGCVSW-4306 Verify the tflite Yolo v3

 * Added debug layer support for QSymmS8
 * QSymmS8 support for workloads

Signed-off-by: Keith Davis <keith.davis@arm.com>
Change-Id: I51af92fadc0be290629dd9198beab5abef9e351f
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 75db73c..f876c6b 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -41,7 +41,7 @@
 // Makes a workload for one the specified types based on the data type requirements of the tensorinfo.
 // Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
 template <typename Float16Workload, typename Float32Workload, typename Uint8Workload, typename Int32Workload,
-          typename BooleanWorkload, typename QueueDescriptorType, typename... Args>
+          typename BooleanWorkload, typename Int8Workload, typename QueueDescriptorType, typename... Args>
 std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
                                               const WorkloadInfo& info,
                                               Args&&... args)
@@ -58,6 +58,8 @@
             return MakeWorkloadForType<Float32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
         case DataType::QAsymmU8:
             return MakeWorkloadForType<Uint8Workload>::Func(descriptor, info, std::forward<Args>(args)...);
+        case DataType::QSymmS8:
+            return MakeWorkloadForType<Int8Workload>::Func(descriptor, info, std::forward<Args>(args)...);
         case DataType::Signed32:
             return MakeWorkloadForType<Int32Workload>::Func(descriptor, info, std::forward<Args>(args)...);
         case DataType::Boolean:
@@ -72,14 +74,14 @@
 
 // Makes a workload for one the specified types based on the data type requirements of the tensorinfo.
 // Calling this method is the equivalent of calling the five typed MakeWorkload method with <FloatWorkload,
-// FloatWorkload, Uint8Workload, NullWorkload, NullWorkload>.
+// FloatWorkload, Uint8Workload, NullWorkload, NullWorkload, NullWorkload>.
 // Specify type void as the WorkloadType for unsupported DataType/WorkloadType combos.
 template <typename FloatWorkload, typename Uint8Workload, typename QueueDescriptorType, typename... Args>
 std::unique_ptr<IWorkload> MakeWorkloadHelper(const QueueDescriptorType& descriptor,
                                               const WorkloadInfo& info,
                                               Args&&... args)
 {
-    return MakeWorkloadHelper<FloatWorkload, FloatWorkload, Uint8Workload, NullWorkload, NullWorkload>(
+    return MakeWorkloadHelper<FloatWorkload, FloatWorkload, Uint8Workload, NullWorkload, NullWorkload, NullWorkload>(
         descriptor,
         info,
         std::forward<Args>(args)...);
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 075884b..5057c8c 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -32,6 +32,8 @@
             return DataType::Float32;
         case DataType::QAsymmU8:
             return DataType::Signed32;
+        case DataType::QSymmS8:
+            return DataType::Signed32;
         case DataType::QSymmS16:
             return DataType::Signed32;
         default:
@@ -418,8 +420,8 @@
         const DataType inputDataType  = inputInfo.GetDataType();
         const DataType outputDataType = outputInfo.GetDataType();
 
-        const bool canHavePerAxisQuantization =
-            inputDataType == DataType::QAsymmU8 && inputDataType == outputDataType;
+        const bool canHavePerAxisQuantization = (inputDataType == DataType::QSymmS8 ||
+            inputDataType == DataType::QAsymmU8) && inputDataType == outputDataType;
 
         if (!canHavePerAxisQuantization)
         {
@@ -1038,6 +1040,7 @@
         DataType::Float32,
         DataType::QAsymmU8,
         DataType::QSymmS16,
+        DataType::QSymmS8,
         DataType::Float16
     };
 
@@ -1071,6 +1074,7 @@
     {
         DataType::Float32,
         DataType::QAsymmU8,
+        DataType::QSymmS8,
         DataType::QSymmS16,
         DataType::Float16
     };
@@ -1178,6 +1182,7 @@
         DataType::Float32,
         DataType::QAsymmU8,
         DataType::QSymmS16,
+        DataType::QSymmS8,
         DataType::Float16
     };
 
@@ -1377,6 +1382,7 @@
         DataType::Float16,
         DataType::Float32,
         DataType::QAsymmU8,
+        DataType::QSymmS8,
         DataType::QSymmS16
     };
 
@@ -1529,6 +1535,7 @@
         DataType::Float16,
         DataType::Signed32,
         DataType::QAsymmU8,
+        DataType::QSymmS8,
         DataType::QSymmS16
     };
 
@@ -1554,6 +1561,7 @@
         DataType::Float16,
         DataType::Signed32,
         DataType::QAsymmU8,
+        DataType::QSymmS8,
         DataType::QSymmS16
     };
 
@@ -2098,6 +2106,7 @@
         DataType::Float32,
         DataType::Signed32,
         DataType::QAsymmU8,
+        DataType::QSymmS8,
         DataType::QSymmS16
     };
 
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index b801f70..8410c30 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -147,9 +147,10 @@
 {
     bool supported = true;
 
-    std::array<DataType,4> supportedTypes = {
+    std::array<DataType,5> supportedTypes = {
         DataType::Float32,
         DataType::Float16,
+        DataType::QSymmS8,
         DataType::QAsymmU8,
         DataType::QSymmS16
     };
@@ -329,10 +330,11 @@
     ignore_unused(descriptor);
 
     bool supported = true;
-    std::array<DataType,4> supportedTypes =
+    std::array<DataType,5> supportedTypes =
     {
             DataType::Float32,
             DataType::Float16,
+            DataType::QSymmS8,
             DataType::QAsymmU8,
             DataType::QSymmS16
     };
@@ -355,11 +357,12 @@
 bool RefLayerSupport::IsConstantSupported(const TensorInfo& output,
                                           Optional<std::string&> reasonIfUnsupported) const
 {
-    std::array<DataType,4> supportedTypes =
+    std::array<DataType,5> supportedTypes =
     {
         DataType::Float32,
         DataType::Signed32,
         DataType::QAsymmU8,
+        DataType::QSymmS8,
         DataType::QSymmS16
     };
 
@@ -417,22 +420,23 @@
     bool supported = true;
 
     // Define supported types.
-    std::array<DataType,4> supportedTypes =
+    std::array<DataType,5> supportedTypes =
     {
         DataType::Float32,
         DataType::Float16,
         DataType::QAsymmU8,
+        DataType::QSymmS8,
         DataType::QSymmS16
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
-                                  "Reference convolution2d: input is not a supported type.");
+                                  "Reference Convolution2d: input is not a supported type.");
 
     supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
-                                  "Reference convolution2d: output is not a supported type.");
+                                  "Reference Convolution2d: output is not a supported type.");
 
     supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
-                                  "Reference convolution2d: input and output types mismatched.");
+                                  "Reference Convolution2d: input and output types mismatched.");
 
     const DataType inputType = input.GetDataType();
     if (inputType == DataType::QAsymmU8)
@@ -447,15 +451,15 @@
         ARMNN_NO_DEPRECATE_WARN_END
 
         supported &= CheckSupportRule(TypeAnyOf(weights, supportedWeightTypes), reasonIfUnsupported,
-                                      "Reference convolution2d: weights type not supported for quantized input.");
+                                      "Reference Convolution2d: weights type not supported for quantized input.");
     }
     else
     {
         supported &= CheckSupportRule(TypeAnyOf(weights, supportedTypes), reasonIfUnsupported,
-                                      "Reference convolution2d: weights is not a supported type.");
+                                      "Reference Convolution2d: weights is not a supported type.");
 
         supported &= CheckSupportRule(TypesAreEqual(input, weights), reasonIfUnsupported,
-                                      "Reference convolution2d: input and weights types mismatched.");
+                                      "Reference Convolution2d: input and weights types mismatched.");
     }
 
     if (biases.has_value())
@@ -468,7 +472,7 @@
         };
 
         supported &= CheckSupportRule(TypeAnyOf(biases.value(), biasesSupportedTypes), reasonIfUnsupported,
-                                      "Reference convolution2d: biases is not a supported type.");
+                                      "Reference Convolution2d: biases is not a supported type.");
     }
     ignore_unused(descriptor);
 
@@ -481,23 +485,24 @@
 {
     bool supported = true;
 
-    std::array<DataType, 5> supportedTypes =
+    std::array<DataType, 6> supportedTypes =
     {
         DataType::Float16,
         DataType::Float32,
         DataType::QAsymmU8,
+        DataType::QSymmS8,
         DataType::QSymmS16,
         DataType::Signed32
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
-                                  "Reference debug: input type not supported");
+                                  "Reference for Debug layer: input type not supported");
 
     supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
-                                  "Reference debug: output type not supported");
+                                  "Reference for Debug layer: output type not supported");
 
     supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
-                                  "Reference debug: input and output types are mismatched");
+                                  "Reference for Debug layer: input and output types are mismatched");
 
     return supported;
 }
@@ -612,7 +617,10 @@
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
-                                  "Reference dequantize: input type not supported.");
+                                  "Reference for Dequantize layer: input type not supported.");
+
+    supported &= CheckSupportRule( TypeNotPerAxisQuantized(input), reasonIfUnsupported,
+                                    "Reference for Dequantize layer: per-axis quantized input not support .");
 
     supported &= CheckSupportRule(TypeNotPerAxisQuantized(input), reasonIfUnsupported,
                                   "Reference dequantize: per-axis quantized input not support .");
@@ -623,10 +631,11 @@
     };
 
     supported &= CheckSupportRule(TypeAnyOf(output, supportedOutputTypes), reasonIfUnsupported,
-                                  "Reference dequantize: output type not supported.");
+                                  "Reference for Dequantize layer: output type not supported.");
 
     supported &= CheckSupportRule(ShapesAreSameTotalSize(input, output), reasonIfUnsupported,
-                                  "Reference dequantize: input and output shapes have different num total elements.");
+                                  "Reference for Dequantize layer: input/output shapes have different num total "
+                                  "elements.");
 
     return supported;
 }
@@ -1104,9 +1113,10 @@
 {
     bool supported = true;
 
-    std::array<DataType,4> supportedTypes = {
+    std::array<DataType,5> supportedTypes = {
         DataType::Float32,
         DataType::Float16,
+        DataType::QSymmS8,
         DataType::QAsymmU8,
         DataType::QSymmS16
     };
@@ -1270,9 +1280,10 @@
 {
     bool supported = true;
 
-    std::array<DataType,4> supportedTypes = {
+    std::array<DataType,5> supportedTypes = {
         DataType::Float32,
         DataType::Float16,
+        DataType::QSymmS8,
         DataType::QAsymmU8,
         DataType::QSymmS16
     };
@@ -1428,8 +1439,9 @@
    bool supported = true;
 
     // Define supported input types.
-    std::array<DataType,1> supportedInputTypes = {
-        DataType::Float32,
+    std::array<DataType,2> supportedInputTypes = {
+        DataType::QSymmS8,
+        DataType::Float32
     };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedInputTypes), reasonIfUnsupported,
@@ -1458,12 +1470,13 @@
     ignore_unused(output);
     ignore_unused(descriptor);
     // Define supported output types.
-    std::array<DataType,5> supportedOutputTypes =
+    std::array<DataType,6> supportedOutputTypes =
     {
         DataType::Float32,
         DataType::Float16,
         DataType::Signed32,
         DataType::QAsymmU8,
+        DataType::QSymmS8,
         DataType::QSymmS16
     };
     return CheckSupportRule(TypeAnyOf(input, supportedOutputTypes), reasonIfUnsupported,
@@ -1502,10 +1515,11 @@
 {
     boost::ignore_unused(descriptor);
     bool supported = true;
-    std::array<DataType,4> supportedTypes =
+    std::array<DataType,5> supportedTypes =
     {
         DataType::Float32,
         DataType::Float16,
+        DataType::QSymmS8,
         DataType::QAsymmU8,
         DataType::QSymmS16
     };
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index e7a9c19..792bd7d 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -24,7 +24,8 @@
 std::unique_ptr<IWorkload> RefWorkloadFactory::MakeWorkload(const QueueDescriptorType& descriptor,
                                                             const WorkloadInfo& info) const
 {
-    return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload>(descriptor, info);
+    return MakeWorkloadHelper<NullWorkload, F32Workload, U8Workload, NullWorkload, NullWorkload, NullWorkload>
+           (descriptor, info);
 }
 
 template <DataType ArmnnType>
@@ -54,6 +55,11 @@
     return IsDataType<DataType::QSymmS16>(info);
 }
 
+bool IsQSymm8(const WorkloadInfo& info)
+{
+    return IsDataType<DataType::QSymmS8>(info);
+}
+
 RefWorkloadFactory::RefWorkloadFactory(const std::shared_ptr<RefMemoryManager>& memoryManager)
     : m_MemoryManager(memoryManager)
 {
@@ -185,6 +191,10 @@
     {
         return std::make_unique<RefDebugQSymm16Workload>(descriptor, info);
     }
+    if (IsQSymm8(info))
+    {
+        return std::make_unique<RefDebugQSymm8Workload>(descriptor, info);
+    }
     if (IsDataType<DataType::Signed32>(info))
     {
         return std::make_unique<RefDebugSigned32Workload>(descriptor, info);
@@ -419,7 +429,7 @@
         return std::make_unique<RefPermuteQSymm16Workload>(descriptor, info);
     }
     return MakeWorkloadHelper<RefPermuteFloat16Workload, RefPermuteFloat32Workload, RefPermuteQAsymm8Workload,
-        NullWorkload, NullWorkload>(descriptor, info);
+        NullWorkload, NullWorkload, NullWorkload>(descriptor, info);
 }
 
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreatePooling2d(const Pooling2dQueueDescriptor& descriptor,
diff --git a/src/backends/reference/workloads/Debug.cpp b/src/backends/reference/workloads/Debug.cpp
index 0f192f3..49e9e02 100644
--- a/src/backends/reference/workloads/Debug.cpp
+++ b/src/backends/reference/workloads/Debug.cpp
@@ -106,6 +106,12 @@
                              const std::string& layerName,
                              unsigned int slotIndex);
 
+template void Debug<int8_t>(const TensorInfo& inputInfo,
+                             const int8_t* inputData,
+                             LayerGuid guid,
+                             const std::string& layerName,
+                             unsigned int slotIndex);
+
 template void Debug<int16_t>(const TensorInfo& inputInfo,
                              const int16_t* inputData,
                              LayerGuid guid,
diff --git a/src/backends/reference/workloads/RefDebugWorkload.cpp b/src/backends/reference/workloads/RefDebugWorkload.cpp
index 59b836d..2f0b427 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.cpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.cpp
@@ -48,6 +48,7 @@
 template class RefDebugWorkload<DataType::Float32>;
 template class RefDebugWorkload<DataType::QAsymmU8>;
 template class RefDebugWorkload<DataType::QSymmS16>;
+template class RefDebugWorkload<DataType::QSymmS8>;
 template class RefDebugWorkload<DataType::Signed32>;
 
 } // namespace armnn
diff --git a/src/backends/reference/workloads/RefDebugWorkload.hpp b/src/backends/reference/workloads/RefDebugWorkload.hpp
index 58e4464..a15a863 100644
--- a/src/backends/reference/workloads/RefDebugWorkload.hpp
+++ b/src/backends/reference/workloads/RefDebugWorkload.hpp
@@ -37,10 +37,11 @@
     DebugCallbackFunction m_Callback;
 };
 
-using RefDebugFloat16Workload = RefDebugWorkload<DataType::Float16>;
-using RefDebugFloat32Workload = RefDebugWorkload<DataType::Float32>;
-using RefDebugQAsymm8Workload = RefDebugWorkload<DataType::QAsymmU8>;
-using RefDebugQSymm16Workload = RefDebugWorkload<DataType::QSymmS16>;
+using RefDebugFloat16Workload  = RefDebugWorkload<DataType::Float16>;
+using RefDebugFloat32Workload  = RefDebugWorkload<DataType::Float32>;
+using RefDebugQAsymm8Workload  = RefDebugWorkload<DataType::QAsymmU8>;
+using RefDebugQSymm16Workload  = RefDebugWorkload<DataType::QSymmS16>;
+using RefDebugQSymm8Workload   = RefDebugWorkload<DataType::QSymmS8>;
 using RefDebugSigned32Workload = RefDebugWorkload<DataType::Signed32>;
 
 } // namespace armnn
diff --git a/src/backends/reference/workloads/RefQuantizeWorkload.cpp b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
index 31534ab..ab2ee7f 100644
--- a/src/backends/reference/workloads/RefQuantizeWorkload.cpp
+++ b/src/backends/reference/workloads/RefQuantizeWorkload.cpp
@@ -50,7 +50,7 @@
         }
         case DataType::QSymmS8:
         {
-            QuantizeImpl<int8_t>(input, output, m_NumElements, m_Scale, m_Offset);
+            QuantizeImpl<int8_t>(input, output, m_NumElements, m_Scale, 0);
             break;
         }
         case DataType::QSymmS16: