COMPMID-2407: Add (logistic and tanh) activation support for QSYMM16 for NEON

Change-Id: Ib89c9cfe12975e51d1710af736c73ce79e667363
Signed-off-by: giuros01 <giuseppe.rossini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1412
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Manuel Bottini <manuel.bottini@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/tests/validation/NEON/ActivationLayer.cpp b/tests/validation/NEON/ActivationLayer.cpp
index a5030b9..1174a05 100644
--- a/tests/validation/NEON/ActivationLayer.cpp
+++ b/tests/validation/NEON/ActivationLayer.cpp
@@ -104,6 +104,8 @@
 constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);
 #endif // defined(__aarch64__)
 
+constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(1);
+
 /** CNN data types */
 const auto CNNDataTypes = framework::dataset::make("DataType",
 {
@@ -233,7 +235,6 @@
                                                                                                   ActivationLayerInfo::ActivationFunction::LOGISTIC,
                                                                                                   ActivationLayerInfo::ActivationFunction::TANH
                                                                                                 });
-
 const auto QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), QuantizedActivationFunctionsDataset),
                                                 framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
 
@@ -256,6 +257,32 @@
     validate(Accessor(_target), _reference, tolerance_qasymm8);
 }
 TEST_SUITE_END() // QASYMM8
+
+/** Input data sets. */
+const auto Int16QuantizedActivationFunctionsDataset = framework::dataset::make("ActivationFunction", { ActivationLayerInfo::ActivationFunction::LOGISTIC,
+                                                                                                       ActivationLayerInfo::ActivationFunction::TANH
+                                                                                                     });
+const auto Int16QuantizedActivationDataset = combine(combine(framework::dataset::make("InPlace", { false }), Int16QuantizedActivationFunctionsDataset),
+                                                     framework::dataset::make("AlphaBeta", { 0.5f, 1.f }));
+
+TEST_SUITE(QSYMM16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallShapes(), Int16QuantizedActivationDataset),
+                                                                                                                        framework::dataset::make("DataType",
+                                                                                                                                DataType::QSYMM16)),
+                                                                                                                        framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_qsymm16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEActivationLayerQuantizedFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::LargeShapes(), Int16QuantizedActivationDataset),
+                                                                                                                      framework::dataset::make("DataType",
+                                                                                                                              DataType::QSYMM16)),
+                                                                                                                      framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 32768.f, 0.f) })))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_qsymm16);
+}
+TEST_SUITE_END() // QSYMM16
 TEST_SUITE_END() // Quantized
 
 TEST_SUITE_END() // ActivationLayer
diff --git a/tests/validation/fixtures/ActivationLayerFixture.h b/tests/validation/fixtures/ActivationLayerFixture.h
index 464382a..4aaf8e7 100644
--- a/tests/validation/fixtures/ActivationLayerFixture.h
+++ b/tests/validation/fixtures/ActivationLayerFixture.h
@@ -52,11 +52,11 @@
         ActivationLayerInfo info(function, alpha_beta, alpha_beta);
 
         _in_place                 = in_place;
-        _output_quantization_info = calculate_output_quantization_info(info, quantization_info);
-        _input_quantization_info  = in_place ? _output_quantization_info : quantization_info;
         _data_type                = data_type;
-        _function                 = function;
+        _output_quantization_info = calculate_output_quantization_info(_data_type, info, quantization_info);
+        _input_quantization_info  = in_place ? _output_quantization_info : quantization_info;
 
+        _function  = function;
         _target    = compute_target(shape, info);
         _reference = compute_reference(shape, info);
     }
@@ -73,7 +73,7 @@
             std::uniform_real_distribution<> distribution(min_bound, max_bound);
             library->fill(tensor, distribution, 0);
         }
-        else if(is_data_type_quantized_asymmetric(tensor.data_type()))
+        else if(is_data_type_quantized_asymmetric(tensor.data_type()) || (is_data_type_quantized_symmetric(tensor.data_type())))
         {
             library->fill_tensor_uniform(tensor, 0);
         }
@@ -141,14 +141,39 @@
     }
 
 private:
-    QuantizationInfo calculate_output_quantization_info(const ActivationLayerInfo &act_info, const QuantizationInfo &default_qinfo)
+    QuantizationInfo calculate_output_quantization_info(DataType dt, const ActivationLayerInfo &act_info, const QuantizationInfo &default_qinfo)
     {
+        auto qasymm8_max = float(std::numeric_limits<uint8_t>::max()) + 1.f;
+        auto qsymm16_max = float(std::numeric_limits<int16_t>::max()) + 1.f;
+
         switch(act_info.activation())
         {
             case ActivationLayerInfo::ActivationFunction::TANH:
-                return QuantizationInfo(1.f / 128.f, 128);
+                if(dt == DataType::QSYMM16)
+                {
+                    return QuantizationInfo(1.f / qsymm16_max, 0);
+                }
+                else if(dt == DataType::QASYMM8)
+                {
+                    return QuantizationInfo(1.f / (0.5 * qasymm8_max), int(0.5 * qasymm8_max));
+                }
+                else
+                {
+                    return default_qinfo;
+                }
             case ActivationLayerInfo::ActivationFunction::LOGISTIC:
-                return QuantizationInfo(1.f / 256.f, 0);
+                if(dt == DataType::QSYMM16)
+                {
+                    return QuantizationInfo(1.f / qsymm16_max, 0);
+                }
+                else if(dt == DataType::QASYMM8)
+                {
+                    return QuantizationInfo(1.f / qasymm8_max, 0);
+                }
+                else
+                {
+                    return default_qinfo;
+                }
             default:
                 return default_qinfo;
         }
diff --git a/tests/validation/reference/ActivationLayer.cpp b/tests/validation/reference/ActivationLayer.cpp
index f5e98aa..f573d12 100644
--- a/tests/validation/reference/ActivationLayer.cpp
+++ b/tests/validation/reference/ActivationLayer.cpp
@@ -65,6 +65,17 @@
     return dst;
 }
 
+template <>
+SimpleTensor<int16_t> activation_layer<int16_t>(const SimpleTensor<int16_t> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info)
+{
+    const QuantizationInfo dst_qinfo = oq_info.empty() ? src.quantization_info() : oq_info;
+
+    SimpleTensor<float>   src_tmp = convert_from_symmetric(src);
+    SimpleTensor<float>   dst_tmp = activation_layer<float>(src_tmp, info);
+    SimpleTensor<int16_t> dst     = convert_to_symmetric<int16_t>(dst_tmp, dst_qinfo);
+    return dst;
+}
+
 template SimpleTensor<float> activation_layer(const SimpleTensor<float> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
 template SimpleTensor<half> activation_layer(const SimpleTensor<half> &src, ActivationLayerInfo info, const QuantizationInfo &oq_info);
 } // namespace reference