IVGCVSW-7752 DTS: Fix QuantizePerChannel tests

* Added validation for scale on all Quantized types
* Added Encoder for Per Axis UINT16 Symmetrical Quantized type
* Added error for Per Axis Asymmetrical Quantized type not supported

Signed-off-by: John Mcloughlin <john.mcloughlin@arm.com>
Change-Id: I433519ccacd71219a92bde2b81955d6abf9219c5
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index d2b14cd..f18c6bf 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -799,31 +799,46 @@
     for (unsigned int i = 0; i < numOutputs; i++) {
         OutputSlot& outputSlot = layer->GetOutputSlot(i);
         TensorInfo info = outputSlot.GetTensorInfo();
-        if (DataType::QAsymmU8 == info.GetDataType())
-        {
-            if (0.f == info.GetQuantizationScale())
-            {
-                noErrors = false;
-                std::stringstream ss;
-                ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
-                   << " (" << layer->GetNameStr() << ") is of type"
-                   << " Quantized 8 bit but its scale parameter has not been set";
-                ReportError(ss.str(), errMessages);
-            }
-            // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
-            if ((info.GetQuantizationScale() != (1.0f / 256.0f) ||
-                 info.GetQuantizationOffset() != 0) &&
-                 layer->GetType() == armnn::LayerType::Softmax)
-            {
-                std::stringstream ss;
-                ss << "Quantization parameters for Softmax layer (Scale: " <<
-                info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
-                ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
-                ARMNN_LOG(warning) << ss.str();
-                info.SetQuantizationScale((1.0f /256.0f));
-                info.SetQuantizationOffset(0);
-                outputSlot.SetTensorInfo(info);
-            }
+        auto quantizationDataType = info.GetDataType();
+        auto quantizationScales = info.GetQuantizationScales();
+        // For any Quantized Tensor ensure scale(s) are set
+        switch(quantizationDataType) {
+            case DataType::QAsymmU8:
+            case DataType::QSymmS16:
+            case DataType::QSymmS8:
+            case DataType::QAsymmS8:
+                if ((quantizationDataType == DataType::QAsymmU8 || quantizationDataType == DataType::QAsymmS8)
+                    && info.HasPerAxisQuantization()) {
+                    throw InvalidArgumentException("Per Axis Quantization is not supported in "
+                                                   "Asymmetric Quantization Datatype.");
+                }
+                if ((!info.HasPerAxisQuantization() && info.GetQuantizationScale() == 0.f)
+                    || (info.HasPerAxisQuantization() && (quantizationScales.end() !=
+                    std::find(quantizationScales.begin(), quantizationScales.end(), 0.f)))) {
+                    noErrors = false;
+                    std::stringstream ss;
+                    ss << "output " << i << " of layer " << GetLayerTypeAsCString(layer->GetType())
+                       << " (" << layer->GetNameStr() << ") is of type"
+                       << " Quantized value but the scale parameter has not been set";
+                    ReportError(ss.str(), errMessages);
+                }
+                // Softmax under QuantisedAsymm8 must always be scale (1.0f/256.0f) and offset 0
+                if (!info.HasPerAxisQuantization() && quantizationDataType == DataType::QAsymmU8 &&
+                    (info.GetQuantizationScale() != (1.0f / 256.0f) ||
+                     info.GetQuantizationOffset() != 0) &&
+                    layer->GetType() == armnn::LayerType::Softmax) {
+                    std::stringstream ss;
+                    ss << "Quantization parameters for Softmax layer (Scale: " <<
+                       info.GetQuantizationScale() << " and Offset: " << info.GetQuantizationOffset() <<
+                       ") are incorrect and have been updated to Scale: 0.00390625 and Offset: 0";
+                    ARMNN_LOG(warning) << ss.str();
+                    info.SetQuantizationScale((1.0f / 256.0f));
+                    info.SetQuantizationOffset(0);
+                    outputSlot.SetTensorInfo(info);
+                }
+                break;
+            default:
+                break;
         }
     }
     return noErrors;
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 2d27951..1665c1f 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -896,4 +896,31 @@
     std::vector<float> m_Scales;
 };
 
+class QSymm16PerAxisEncoder : public PerAxisIterator<int16_t, Encoder<float>>
+{
+public:
+    QSymm16PerAxisEncoder(int16_t* data, const std::vector<float>& scale,
+                          unsigned int axisFactor, unsigned int axisDimensionality)
+            : PerAxisIterator(data, axisFactor, axisDimensionality), m_Scale(scale) {}
+
+    void Set(float right)
+    {
+        *m_Iterator = armnn::Quantize<int16_t>(right, m_Scale[m_AxisIndex], 0);
+    }
+
+    float Get() const
+    {
+        return armnn::Dequantize(*m_Iterator, m_Scale[m_AxisIndex], 0);
+    }
+
+    // Get scale of the current value
+    float GetScale() const
+    {
+        return m_Scale[m_AxisIndex];
+    }
+
+private:
+    std::vector<float> m_Scale;
+};
+
 } // namespace armnn
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index d6d6114..8a70237 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -56,10 +56,24 @@
         }
         case armnn::DataType::QSymmS16:
         {
-            return std::make_unique<QSymm16Encoder>(
-                static_cast<int16_t*>(data),
-                info.GetQuantizationScale(),
-                info.GetQuantizationOffset());
+            if (info.HasPerAxisQuantization())
+            {
+                unsigned int axis = info.GetQuantizationDim().value();
+                auto axisDimensionality = info.GetShape()[axis];
+                std::pair<unsigned int, std::vector<float>> params = armnnUtils::GetPerAxisParams(info);
+                return std::make_unique<QSymm16PerAxisEncoder>(
+                        static_cast<int16_t*>(data),
+                        params.second,
+                        params.first,
+                        axisDimensionality);
+            }
+            else
+            {
+                return std::make_unique<QSymm16Encoder>(
+                        static_cast<int16_t *>(data),
+                        info.GetQuantizationScale(),
+                        info.GetQuantizationOffset());
+            }
         }
         case armnn::DataType::Signed32:
         {