COMPMID-2411: Add (logistic and tanh) activation support for QSYMM16 for CL

Change-Id: I8d72490b1cc58563ba7b94664135586bc40e6526
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1466
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index db57bb9..36d8bed 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -148,8 +148,8 @@
     { "accumulate_squared", "accumulate.cl" },
     { "accumulate_weighted", "accumulate.cl" },
     { "activation_layer", "activation_layer.cl" },
-    { "activation_layer_qa8", "activation_layer_qa8.cl" },
-    { "activation_layer_qa8_f32", "activation_layer_qa8.cl" },
+    { "activation_layer_quant", "activation_layer_quant.cl" },
+    { "activation_layer_quant_f32", "activation_layer_quant.cl" },
     { "batch_to_space_nchw", "batch_to_space.cl" },
     { "batch_to_space_static_nchw", "batch_to_space.cl" },
     { "batch_to_space_nhwc", "batch_to_space.cl" },
@@ -576,8 +576,8 @@
 #include "./cl_kernels/activation_layer.clembed"
     },
     {
-        "activation_layer_qa8.cl",
-#include "./cl_kernels/activation_layer_qa8.clembed"
+        "activation_layer_quant.cl",
+#include "./cl_kernels/activation_layer_quant.clembed"
     },
     {
         "batch_to_space.cl",
diff --git a/src/core/CL/cl_kernels/activation_layer_qa8.cl b/src/core/CL/cl_kernels/activation_layer_quant.cl
similarity index 76%
rename from src/core/CL/cl_kernels/activation_layer_qa8.cl
rename to src/core/CL/cl_kernels/activation_layer_quant.cl
index 41f23ca..ebd3408 100644
--- a/src/core/CL/cl_kernels/activation_layer_qa8.cl
+++ b/src/core/CL/cl_kernels/activation_layer_quant.cl
@@ -21,9 +21,8 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "helpers.h"
+#include "activation_quant_helpers.h"
 
-#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
 #define VEC_FLOAT VEC_DATA_TYPE(float, VEC_SIZE)
 
 #if defined(FLOAT_DOMAIN)
@@ -31,15 +30,7 @@
 
 #include "activation_float_helpers.h"
 
-#if defined(O2_VAL) && defined(S2_VAL)
-#define OFFSET_OUT O2_VAL
-#define SCALE_OUT S2_VAL
-#else // defined(O2_VAL) && defined(S2_VAL)
-#define OFFSET_OUT O1_VAL
-#define SCALE_OUT S1_VAL
-#endif // defined(O2_VAL) && defined(S2_VAL)
-
-/** This performs an activation function on QASYMM8 inputs with float transformations.
+/** This performs an activation function on quantized inputs with float transformations.
  *
  * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
  *
@@ -47,10 +38,10 @@
  * @note Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
  * @note A, B variables required by some activation functions are set using -DA_VAL= and -DB_VAL= respectively.
  * @note Quantization scales of the input/output tensors are passed in with -DS1_VAL= and -DS2_VAL= respectively.
- * @note Quantization offsets of the input/output tensors are passed in with -DO1_VAL= and -DO2_VAL= respectively.
+ * @note Quantization offsets of the input/output tensors are passed in only if asymmetric with -DO1_VAL= and -DO2_VAL= respectively.
  * @note Quantized value of constant zero should be given as a preprocessor argument using -DCONST_0=value. e.g. -DCONST_0=128.
  *
- * @param[in]  input_ptr                            Pointer to the source image. Supported data types: QASYMM8
+ * @param[in]  input_ptr                            Pointer to the source image. Supported data types: QASYMM8/QSYMM16
  * @param[in]  input_stride_x                       Stride of the source image in X dimension (in bytes)
  * @param[in]  input_step_x                         input_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in]  input_stride_y                       Stride of the source image in Y dimension (in bytes)
@@ -67,7 +58,7 @@
  * @param[in]  output_step_z                        (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
  * @param[in]  output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
  */
-__kernel void activation_layer_qa8_f32(
+__kernel void activation_layer_quant_f32(
     TENSOR3D_DECLARATION(input)
 #ifndef IN_PLACE
     ,
@@ -87,10 +78,18 @@
     TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr);
 
     VEC_FLOAT data_flt = CONVERT(data, VEC_FLOAT);
-    data_flt           = round(data_flt - (float)O1_VAL) * ((float)S1_VAL);
-    data_flt           = ACTIVATION(ACT, float, data_flt, A_VAL, B_VAL);
+#if defined(O1_VAL)
+    data_flt = round(data_flt - (float)O1_VAL) * ((float)S1_VAL);
+#else  // defined(O1_VAL)
+    data_flt        = round(data_flt) * ((float)S1_VAL);
+#endif // defined(O1_VAL)
+    data_flt = ACTIVATION(ACT, float, data_flt, A_VAL, B_VAL);
 
-    data = CONVERT_SAT(round(data_flt / ((float)SCALE_OUT)) + (float)OFFSET_OUT, TYPE);
+#if defined(O2_VAL)
+    data = CONVERT_SAT(round(data_flt / ((float)S2_VAL)) + (float)O2_VAL, TYPE);
+#else  // defined(O2_VAL)
+    data            = CONVERT_SAT(round(data_flt / ((float)S2_VAL)), TYPE);
+#endif // defined(O2_VAL)
 
     // Store result
     VSTORE(VEC_SIZE)
@@ -100,45 +99,8 @@
 #else // defined(FLOAT_DOMAIN)
 // Activations performed in the quantized domain
 
-// RELU Activation
-inline TYPE relu_op(TYPE x)
-{
-    return max((TYPE)CONST_0, x);
-}
-// Bounded RELU Activation
-inline TYPE brelu_op(TYPE x)
-{
-    return min((TYPE)A_VAL, max(CONST_0, x));
-}
-// Lower Upper Bounded RELU Activation
-inline TYPE lu_brelu_op(TYPE x)
-{
-    return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL);
-}
-
-#define ACTIVATION_OP2(op, x) op##_op(x)
-#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x)
-
-#if defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL)
-#define PERFORM_ACTIVATION_QA8(act, data)                                                         \
-    ({                                                                                            \
-        data = ACTIVATION_OP(act, data);                                                          \
-        \
-        VEC_DATA_TYPE(float, VEC_SIZE)                                                            \
-        fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE));                                    \
-        \
-        fdata = round((fdata - (float)O1_VAL) * ((float)S1_VAL / (float)S2_VAL) + (float)O2_VAL); \
-        data  = CONVERT_SAT(fdata, VEC_DATA_TYPE(uchar, VEC_SIZE));                               \
-    })
-#else /* defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) */
-#define PERFORM_ACTIVATION_QA8(act, data) \
-    ({                                    \
-        data = ACTIVATION_OP(act, data);  \
-    })
-#endif /* defined(O1_VAL) && defined(O2_VAL) && defined(S1_VAL) && defined(S2_VAL) */
-
 #if defined(ACT)
-/** This performs an activation function on QASYMM8 inputs.
+/** This performs an activation function on quantized inputs.
  *
  * @note In order to perform the activation function "in-place", the pre-processor -DIN_PLACE must be passed at compile time
  *
@@ -150,7 +112,7 @@
  * @note Quantization offsets of the input/output tensors are passed in with -DO1_VAL= and -DO2_VAL= respectively.
  * @note Quantized value of constant zero should be given as a preprocessor argument using -DCONST_0=value. e.g. -DCONST_0=128.
  *
- * @param[in]  input_ptr                            Pointer to the source image. Supported data types: QASYMM8
+ * @param[in]  input_ptr                            Pointer to the source image. Supported data types: QASYMM8/QSYMM16
  * @param[in]  input_stride_x                       Stride of the source image in X dimension (in bytes)
  * @param[in]  input_step_x                         input_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in]  input_stride_y                       Stride of the source image in Y dimension (in bytes)
@@ -167,7 +129,7 @@
  * @param[in]  output_step_z                        (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
  * @param[in]  output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination image
  */
-__kernel void activation_layer_qa8(
+__kernel void activation_layer_quant(
     TENSOR3D_DECLARATION(input)
 #ifndef IN_PLACE
     ,
@@ -186,7 +148,7 @@
     // Load data
     TYPE data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input.ptr);
 
-    data = PERFORM_ACTIVATION_QA8(ACT, data);
+    data = PERFORM_ACTIVATION_QUANT(ACT, data);
 
     // Store result
     VSTORE(VEC_SIZE)
diff --git a/src/core/CL/cl_kernels/activation_quant_helpers.h b/src/core/CL/cl_kernels/activation_quant_helpers.h
new file mode 100644
index 0000000..402e7ac
--- /dev/null
+++ b/src/core/CL/cl_kernels/activation_quant_helpers.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "helpers.h"
+
+#define TYPE VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+
+#if defined(S1_VAL) && !defined(S2_VAL)
+#define S2_VAL S1_VAL
+#endif // defined(S1_VAL) && !defined(S2_VAL)
+#if defined(O1_VAL) && !defined(O2_VAL)
+#define O2_VAL O1_VAL
+#endif // defined(O1_VAL) && !defined(O2_VAL)
+
+// RELU Activation
+inline TYPE relu_op(TYPE x)
+{
+    return max((TYPE)CONST_0, x);
+}
+// Bounded RELU Activation
+inline TYPE brelu_op(TYPE x)
+{
+    return min((TYPE)A_VAL, max(CONST_0, x));
+}
+// Lower Upper Bounded RELU Activation
+inline TYPE lu_brelu_op(TYPE x)
+{
+    return min(max(x, (TYPE)B_VAL), (TYPE)A_VAL);
+}
+
+#define ACTIVATION_OP2(op, x) op##_op(x)
+#define ACTIVATION_OP(op, x) ACTIVATION_OP2(op, x)
+
+#if defined(S1_VAL) && defined(S2_VAL)
+#if defined(O1_VAL) && defined(O2_VAL)
+#define PERFORM_ACTIVATION_QUANT(act, data)                                                       \
+    ({                                                                                            \
+        data = ACTIVATION_OP(act, data);                                                          \
+        \
+        VEC_DATA_TYPE(float, VEC_SIZE)                                                            \
+        fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE));                                    \
+        \
+        fdata = round((fdata - (float)O1_VAL) * ((float)S1_VAL / (float)S2_VAL) + (float)O2_VAL); \
+        data  = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE));                           \
+    })
+#else // defined(O1_VAL) && defined(O2_VAL)
+#define PERFORM_ACTIVATION_QUANT(act, data)                             \
+    ({                                                                  \
+        data = ACTIVATION_OP(act, data);                                \
+        \
+        VEC_DATA_TYPE(float, VEC_SIZE)                                  \
+        fdata = CONVERT(data, VEC_DATA_TYPE(float, VEC_SIZE));          \
+        \
+        fdata = round((fdata) * ((float)S1_VAL / (float)S2_VAL));       \
+        data  = CONVERT_SAT(fdata, VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)); \
+    })
+#endif /* defined(O1_VAL) && defined(O2_VAL) */
+#else  /* defined(S1_VAL) && defined(S2_VAL) */
+#define PERFORM_ACTIVATION_QUANT(act, data) \
+    ({                                      \
+        data = ACTIVATION_OP(act, data);    \
+    })
+#endif /* defined(S1_VAL) && defined(S2_VAL) */
\ No newline at end of file
diff --git a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
index 13568b0..8f2e441 100644
--- a/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
+++ b/src/core/CL/cl_kernels/depthwise_convolution_quantized.cl
@@ -31,8 +31,8 @@
 #ifndef VEC_SIZE
 #define VEC_SIZE 8
 #endif /* VEC_SIZE */
-#include "activation_layer_qa8.cl"
-#define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QA8(ACTIVATION_TYPE, x)
+#include "activation_layer_quant.cl"
+#define ACTIVATION_FUNC(x) PERFORM_ACTIVATION_QUANT(ACTIVATION_TYPE, x)
 #else /* defined(ACTIVATION_TYPE) && defined(CONST_0) */
 #define ACTIVATION_FUNC(x) (x)
 #endif /* defined(ACTIVATION_TYPE) && defined(CONST_0) */
diff --git a/src/core/CL/kernels/CLActivationLayerKernel.cpp b/src/core/CL/kernels/CLActivationLayerKernel.cpp
index 34d1298..97a0ff6 100644
--- a/src/core/CL/kernels/CLActivationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLActivationLayerKernel.cpp
@@ -46,9 +46,9 @@
 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_F16_UNSUPPORTED(input);
-    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, DataType::F16, DataType::F32);
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8, DataType::QASYMM8, DataType::QSYMM16, DataType::F16, DataType::F32);
 
-    static std::set<ActivationLayerInfo::ActivationFunction> qs8_supported_activations =
+    static std::set<ActivationLayerInfo::ActivationFunction> quantized_supported_activations =
     {
         ActivationLayerInfo::ActivationFunction::RELU,
         ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU,
@@ -60,11 +60,15 @@
     const QuantizationInfo                       &oq_info   = (output != nullptr) ? output->quantization_info() : input->quantization_info();
     const ActivationLayerInfo::ActivationFunction f_act     = act_info.activation();
 
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized_asymmetric(data_type) && (qs8_supported_activations.count(f_act) == 0),
-                                    "For QASYMM8 only tanh, logistic, relu and lower/upper bounded relu are supported");
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(is_data_type_quantized(data_type) && (quantized_supported_activations.count(f_act) == 0),
+                                    "For Quantized data type only tanh, logistic, relu and lower/upper bounded relu are supported");
+
     ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::TANH) && (oq_info != QuantizationInfo(1.f / 128.f, 128)));
     ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_asymmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) && (oq_info != QuantizationInfo(1.f / 256.f, 0)));
 
+    ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_symmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::TANH) && (oq_info != QuantizationInfo(1.f / 32768.f, 0)));
+    ARM_COMPUTE_RETURN_ERROR_ON(is_data_type_quantized_symmetric(data_type) && (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) && (oq_info != QuantizationInfo(1.f / 32768.f, 0)));
+
     // Checks performed when output is configured
     if((output != nullptr) && (output->total_size() != 0))
     {
@@ -135,16 +139,22 @@
     int                b_const_int                       = 0;
 
     const ActivationLayerInfo::ActivationFunction f_act                       = act_info.activation();
-    const bool                                    is_quantized_asymmetric     = is_data_type_quantized_asymmetric(dt);
+    const bool                                    is_quantized                = is_data_type_quantized(dt);
     const bool                                    perform_activation_in_float = (f_act == ActivationLayerInfo::ActivationFunction::LOGISTIC) || (f_act == ActivationLayerInfo::ActivationFunction::TANH);
 
     // Create quantized version of constants a, b if needed
-    if(is_quantized_asymmetric)
+    if(dt == DataType::QASYMM8)
     {
         const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
         a_const_int                           = quantize_qasymm8(a_const, iq_info);
         b_const_int                           = quantize_qasymm8(b_const, iq_info);
     }
+    else if(dt == DataType::QSYMM16)
+    {
+        const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
+        a_const_int                           = quantize_qsymm16(a_const, iq_info);
+        b_const_int                           = quantize_qsymm16(b_const, iq_info);
+    }
 
     // Set build options
     CLBuildOptions build_opts;
@@ -155,7 +165,7 @@
     build_opts.add_option(("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration)));
 
     // Set A, B constants in build options
-    if(is_quantized_asymmetric && !perform_activation_in_float)
+    if(is_quantized && !perform_activation_in_float)
     {
         build_opts.add_option(("-DA_VAL=" + support::cpp11::to_string(a_const_int)));
         build_opts.add_option(("-DB_VAL=" + support::cpp11::to_string(b_const_int)));
@@ -167,14 +177,14 @@
     }
 
     // Set quantization info build options
-    if(is_quantized_asymmetric)
+    if(is_quantized)
     {
         const UniformQuantizationInfo iq_info = input->info()->quantization_info().uniform();
 
         // Quantized value of 0 corresponds to the offset o1
-        build_opts.add_option(("-DCONST_0=" + support::cpp11::to_string(iq_info.offset)));
+        build_opts.add_option(("-DCONST_0=" + (is_data_type_quantized_asymmetric(dt) ? support::cpp11::to_string(iq_info.offset) : "0")));
         build_opts.add_option(("-DS1_VAL=" + float_to_string_with_full_precision(iq_info.scale)));
-        build_opts.add_option(("-DO1_VAL=" + support::cpp11::to_string(iq_info.offset)));
+        build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), "-DO1_VAL=" + support::cpp11::to_string(iq_info.offset));
 
         // Set scale and offset of the input and output if they have different quantization info
         if(output != nullptr)
@@ -184,16 +194,16 @@
             if(iq_info != oq_info)
             {
                 build_opts.add_option(("-DS2_VAL=" + float_to_string_with_full_precision(oq_info.scale)));
-                build_opts.add_option(("-DO2_VAL=" + support::cpp11::to_string(oq_info.offset)));
+                build_opts.add_option_if(is_data_type_quantized_asymmetric(dt), "-DO2_VAL=" + support::cpp11::to_string(oq_info.offset));
             }
         }
     }
 
     // Create kernel
     std::string kernel_name = std::string("activation_layer");
-    if(is_quantized_asymmetric)
+    if(is_quantized)
     {
-        kernel_name += perform_activation_in_float ? std::string("_qa8_f32") : std::string("_qa8");
+        kernel_name += perform_activation_in_float ? std::string("_quant_f32") : std::string("_quant");
     }
     _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel(kernel_name, build_opts.options()));