IVGCVSW-4148 Extend reporting of quant multiplier > 1 as unsupported on ACL to per-axis case

Signed-off-by: Aron Virginas-Tar <Aron.Virginas-Tar@arm.com>
Change-Id: I66a8360b6d86e95325dee58927dcbe62ccf6ad58
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index e4fdb21..328a083 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -232,5 +232,32 @@
     }
 }
 
+bool IsQuantMultiplierSupported(const TensorInfo& input,
+                                const TensorInfo& output,
+                                const TensorInfo& weights)
+{
+    constexpr float maxQuantMultiplier = 1.0f;
+    if (weights.HasMultipleQuantizationScales())
+    {
+        for (float weightScale : weights.GetQuantizationScales())
+        {
+            if ((input.GetQuantizationScale() * weightScale) / output.GetQuantizationScale() > maxQuantMultiplier)
+            {
+                return false;
+            }
+        }
+    }
+    else
+    {
+        if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) /
+            output.GetQuantizationScale() > maxQuantMultiplier)
+        {
+            return false;
+        }
+    }
+
+    return true;
+}
+
 } // namespace armcomputetensorutils
 } // namespace armnn
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.hpp b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
index ef837d8..3fc6818 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.hpp
@@ -243,5 +243,9 @@
     return GetTensorShape(shape, 1U);
 }
 
+bool IsQuantMultiplierSupported(const TensorInfo& input,
+                                const TensorInfo& output,
+                                const TensorInfo& weights);
+
 } // namespace armcomputetensorutils
 } // namespace armnn
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 9a5c383..49312d6 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -16,6 +16,7 @@
 
 #if defined(ARMCOMPUTECL_ENABLED)
 #include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
 #include "workloads/ClAbsWorkload.hpp"
 #include "workloads/ClAdditionWorkload.hpp"
 #include "workloads/ClActivationWorkload.hpp"
@@ -144,6 +145,13 @@
                                       std::forward<Params>(params)...);
 }
 
+#if defined(ARMCOMPUTECL_ENABLED)
+#define IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights) \
+armcomputetensorutils::IsQuantMultiplierSupported(input, output, weights)
+#else
+#define IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights) true
+#endif
+
 } // anonymous namespace
 
 bool ClLayerSupport::IsAbsSupported(const TensorInfo& input,
@@ -324,8 +332,7 @@
                                               const Optional<TensorInfo>& biases,
                                               Optional<std::string&> reasonIfUnsupported) const
 {
-    // Multiplier > 1.0f currently not supported in ACL
-    if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+    if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
     {
         return false;
     }
@@ -368,13 +375,7 @@
                                                      const Optional<TensorInfo>& biases,
                                                      Optional<std::string&> reasonIfUnsupported) const
 {
-    if (weights.HasPerAxisQuantization())
-    {
-        return false;
-    }
-
-    // Multiplier > 1.0f currently not supported in ACL
-    if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+    if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
     {
         return false;
     }
@@ -395,8 +396,7 @@
                                                             const Optional<TensorInfo>& biases,
                                                             Optional<std::string&> reasonIfUnsupported) const
 {
-    // Multiplier > 1.0f currently not supported in ACL
-    if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+    if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
     {
         return false;
     }
@@ -814,8 +814,7 @@
                                                        const Optional<TensorInfo>& biases,
                                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    // Multiplier > 1.0f currently not supported in ACL
-    if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+    if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
     {
         return false;
     }
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 4474b12..20b6550 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -18,6 +18,7 @@
 
 #if defined(ARMCOMPUTENEON_ENABLED)
 #include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
 #include "workloads/NeonAbsWorkload.hpp"
 #include "workloads/NeonAdditionWorkload.hpp"
 #include "workloads/NeonActivationWorkload.hpp"
@@ -112,6 +113,13 @@
     return IsNeonBackendSupported(reasonIfUnsupported);
 #endif
 
+#if defined(ARMCOMPUTENEON_ENABLED)
+#define IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights) \
+armcomputetensorutils::IsQuantMultiplierSupported(input, output, weights)
+#else
+#define IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights) true
+#endif
+
 } // anonymous namespace
 
 bool NeonLayerSupport::IsAbsSupported(const TensorInfo& input,
@@ -274,8 +282,7 @@
                                                 const Optional<TensorInfo>& biases,
                                                 Optional<std::string&> reasonIfUnsupported) const
 {
-    // Multiplier > 1.0f currently not supported in ACL
-    if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+    if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
     {
         return false;
     }
@@ -308,13 +315,7 @@
                                                        const Optional<TensorInfo>& biases,
                                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    if (weights.HasPerAxisQuantization())
-    {
-        return false;
-    }
-
-    // Multiplier > 1.0f currently not supported in ACL
-    if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+    if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
     {
         return false;
     }
@@ -345,8 +346,7 @@
                                                               const Optional<TensorInfo>& biases,
                                                               Optional<std::string&> reasonIfUnsupported) const
 {
-    // Multiplier > 1.0f currently not supported in ACL
-    if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+    if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
     {
         return false;
     }
@@ -751,8 +751,7 @@
                                                          const Optional<TensorInfo>& biases,
                                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    // Multiplier > 1.0f currently not supported in ACL
-    if ((input.GetQuantizationScale() * weights.GetQuantizationScale()) / output.GetQuantizationScale() > 1.0f)
+    if (!IS_QUANT_MULTIPLIER_SUPPORTED(input, output, weights))
     {
         return false;
     }