Add tolerance for quantized activations computed in float

Some of the activation functions need complex mathematical operations
and are implemented by dequantizing to float, performing the activation
in the float domain and requantizing back. In such cases, the results
may differ slightly between reference and optimized code.

In fact, when running validation through valgrind we get a difference of
1 in the results and therefore, an absolute tolerance of 1 is added to
the tests.

Resolves: COMPMID-4067

Change-Id: Ic2eca5616371b0a324a246d40b515ddc9f576e61
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4841
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index 45e09fb..456b96d 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -136,12 +136,27 @@
     }
 }
 
-/** Tolerance for quantized asymmetric operations */
-#if(!defined(__aarch64__) || defined(__ARM_FEATURE_SVE2))
-constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
-#else  // !(!defined(__aarch64__) || defined(__ARM_FEATURE_SVE2))
-constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(0);
-#endif // defined(__aarch64__)
+/** Define absolute tolerance of the activation layer for qasymm8.
+ *
+ * @param[in] activation The activation function used.
+ *
+ * @return Absolute tolerance depending on the activation function.
+ */
+AbsoluteTolerance<uint8_t> tolerance_qasymm8(ActivationLayerInfo::ActivationFunction activation)
+{
+    switch(activation)
+    {
+        case ActivationLayerInfo::ActivationFunction::LOGISTIC:
+        case ActivationLayerInfo::ActivationFunction::SQRT:
+        case ActivationLayerInfo::ActivationFunction::TANH:
+        case ActivationLayerInfo::ActivationFunction::HARD_SWISH:
+        case ActivationLayerInfo::ActivationFunction::SOFT_RELU:
+        case ActivationLayerInfo::ActivationFunction::LEAKY_RELU:
+            return AbsoluteTolerance<uint8_t>(1);
+        default:
+            return AbsoluteTolerance<uint8_t>(0);
+    }
+}
 
 constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(1);
 
@@ -285,7 +300,7 @@
                                                                                                                   framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.1f, 128.0f) })))
 {
     // Validate output
-    validate(Accessor(_target), _reference, tolerance_qasymm8);
+    validate(Accessor(_target), _reference, tolerance_qasymm8(_function));
 }
 TEST_SUITE_END() // QASYMM8
 
@@ -296,7 +311,7 @@
                                                                                                                  framework::dataset::make("QuantizationInfo", { QuantizationInfo(0.5f, 10.0f) })))
 {
     // Validate output
-    validate(Accessor(_target), _reference, tolerance_qasymm8);
+    validate(Accessor(_target), _reference, tolerance_qasymm8(_function));
 }
 TEST_SUITE_END() // QASYMM8_SIGNED