COMPMID-423: Port CLSoftmaxLayer to QS8

Change-Id: I759b7585656d018d7c864425118cd3ec2ca9b0eb
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/78908
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Reviewed-by: Moritz Pflanzer <moritz.pflanzer@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
diff --git a/src/core/CL/CLHelpers.cpp b/src/core/CL/CLHelpers.cpp
index 21b72dd..4b5bbbb 100644
--- a/src/core/CL/CLHelpers.cpp
+++ b/src/core/CL/CLHelpers.cpp
@@ -72,6 +72,8 @@
     {
         case DataType::U8:
             return "uchar";
+        case DataType::QS8:
+            return "qs8";
         case DataType::S8:
             return "char";
         case DataType::U16:
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 6c64265..081edac 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -353,6 +353,10 @@
 #include "./cl_kernels/fill_border.clembed"
     },
     {
+        "fixed_point.h",
+#include "./cl_kernels/fixed_point.hembed"
+    },
+    {
         "gaussian_pyramid.cl",
 #include "./cl_kernels/gaussian_pyramid.clembed"
     },
diff --git a/src/core/CL/cl_kernels/fixed_point.h b/src/core/CL/cl_kernels/fixed_point.h
new file mode 100644
index 0000000..2c100c2
--- /dev/null
+++ b/src/core/CL/cl_kernels/fixed_point.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_FIXED_POINT_H
+#define ARM_COMPUTE_FIXED_POINT_H
+
+#define TYPE_ALIAS(type, alias)  \
+    typedef type alias;          \
+    typedef type alias##x##1;    \
+    typedef type##2 alias##x##2; \
+    typedef type##3 alias##x##3; \
+    typedef type##4 alias##x##4; \
+    typedef type##8 alias##x##8; \
+    typedef type##16 alias##x##16;
+
+TYPE_ALIAS(char, qs8)
+TYPE_ALIAS(short, qs16)
+
+#define qs8_MIN ((char)CHAR_MIN)
+#define qs8_MAX ((char)CHAR_MAX)
+#define qs16_MIN ((short)SHRT_MIN)
+#define qs16_MAX ((short)SHRT_MAX)
+
+#define qu8_MIN ((uchar)0)
+#define qu8_MAX ((uchar)UCHAR_MAX)
+#define qu16_MIN ((ushort)0)
+#define qu16_MAX ((ushort)USHRT_MAX)
+
+#define qs8_TYPE char
+#define qs8x1_TYPE char
+#define qs8x2_TYPE char2
+#define qs8x4_TYPE char4
+#define qs8x8_TYPE char8
+#define qs8x16_TYPE char16
+
+#define qs16_TYPE short
+#define qs16x1_TYPE short
+#define qs16x2_TYPE short2
+#define qs16x4_TYPE short4
+#define qs16x8_TYPE short8
+#define qs16x16_TYPE short16
+
+#undef VEC_DATA_TYPE_STR
+#undef VEC_DATA_TYPE
+#undef CONVERT_STR
+#undef CONVERT
+#undef CONVERT_SAT_STR
+#undef CONVERT_SAT
+
+#define VEC_DATA_TYPE_STR(type, size) type##x##size
+#define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size)
+
+#define CONVERT_STR3(x, type, rtype) (convert_##rtype((x)))
+#define CONVERT_STR2(x, type, rtype) CONVERT_STR3(x, type, rtype)
+#define CONVERT_STR(x, type) CONVERT_STR2(x, type, type##_TYPE)
+#define CONVERT(x, type) CONVERT_STR(x, type)
+
+#define CONVERT_SAT_STR3(x, type, rtype) (convert_##rtype##_sat((x)))
+#define CONVERT_SAT_STR2(x, type, rtype) CONVERT_SAT_STR3(x, type, rtype)
+#define CONVERT_SAT_STR(x, type) CONVERT_SAT_STR2(x, type, type##_TYPE)
+#define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type)
+
+/* Computes max of fixed point types.
+ *
+ * @param[in] type is the actual data type.
+ *
+ * @return The result of the fixed point vector maximum.
+ */
+#define MAXQ_IMPL(type)                          \
+    inline type max_##type(type VopA, type VopB) \
+    {                                            \
+        return max(VopA, VopB);                  \
+    }
+
+MAXQ_IMPL(qs8x1)
+MAXQ_IMPL(qs8x2)
+MAXQ_IMPL(qs8x4)
+MAXQ_IMPL(qs8x8)
+MAXQ_IMPL(qs8x16)
+
+#define MAX_OP_EXPAND_STR(a, b, type, size) max_##type##x##size((a), (b))
+#define MAX_OP_EXPAND(a, b, type, size) MAX_OP_EXPAND_STR(a, b, type, size)
+
+/* Computes saturated addition of fixed point types.
+ *
+ * @param[in] type is the actual data type.
+ *
+ * @return The result of the fixed point vector addition. The result is saturated in case of overflow
+ */
+#define ADDQ_SAT_IMPL(type)                          \
+    inline type add_sat_##type(type VopA, type VopB) \
+    {                                                \
+        return add_sat(VopA, VopB);                  \
+    }
+
+ADDQ_SAT_IMPL(qs8x1)
+ADDQ_SAT_IMPL(qs8x2)
+ADDQ_SAT_IMPL(qs8x4)
+ADDQ_SAT_IMPL(qs8x8)
+ADDQ_SAT_IMPL(qs8x16)
+
+#define ADD_SAT_OP_EXPAND_STR(a, b, type, size) add_sat_##type##x##size((a), (b))
+#define ADD_SAT_OP_EXPAND(a, b, type, size) ADD_SAT_OP_EXPAND_STR(a, b, type, size)
+
+/* Computes saturated subtraction of fixed point types.
+ *
+ * @param[in] type is the actual data type.
+ *
+ * @return The result of the fixed point vector subtraction. The result is saturated in case of overflow
+ */
+#define SUBQ_SAT_IMPL(type)                          \
+    inline type sub_sat_##type(type VopA, type VopB) \
+    {                                                \
+        return sub_sat(VopA, VopB);                  \
+    }
+
+SUBQ_SAT_IMPL(qs8x1)
+SUBQ_SAT_IMPL(qs8x2)
+SUBQ_SAT_IMPL(qs8x4)
+SUBQ_SAT_IMPL(qs8x8)
+SUBQ_SAT_IMPL(qs8x16)
+
+#define SUB_SAT_OP_EXPAND_STR(a, b, type, size) sub_sat_##type##x##size((a), (b))
+#define SUB_SAT_OP_EXPAND(a, b, type, size) SUB_SAT_OP_EXPAND_STR(a, b, type, size)
+
+/* Saturate multiply of two fixed point vectors
+ *
+ * @param[in] type  is the actual data type.
+ * @param[in] itype is the intermediate data type.
+ *
+ * @return The result of the fixed point vector subtraction. The result is saturated in case of overflow
+ */
+#define MULQ_SAT_IMPL(type, itype)                                                            \
+    inline type mul_sat_##type(type VopA, type VopB, int fixed_point_position)                \
+    {                                                                                         \
+        itype round_val = (itype)(1 << (fixed_point_position - 1));                           \
+        itype res       = mad_sat(CONVERT((VopA), itype), CONVERT((VopB), itype), round_val); \
+        return CONVERT_SAT((res >> (itype)fixed_point_position), type);                       \
+    }
+
+MULQ_SAT_IMPL(qs8x16, qs16x16)
+
+#define MUL_SAT_OP_EXPAND_STR(a, b, type, size, position) mul_sat_##type##x##size((a), (b), (position))
+#define MUL_SAT_OP_EXPAND(a, b, type, size, position) MUL_SAT_OP_EXPAND_STR(a, b, type, size, position)
+
+/** Saturate division of two fixed point vectors
+  *
+  * @param[in] stype is the actual scalar data type.
+  * @param[in] type  is the actual data type.
+  * @param[in] itype is the intermediate data type.
+  *
+  * @return The result of the fixed point division. The result is saturated in case of overflow
+  */
+#define DIVQ_SAT_IMPL(stype, type, itype)                                                                                                                \
+    inline type div_sat_##type(type VopA, type VopB, int fixed_point_position)                                                                           \
+    {                                                                                                                                                    \
+        itype conv_a      = CONVERT((VopA), itype);                                                                                                      \
+        itype denominator = CONVERT((VopB), itype);                                                                                                      \
+        itype numerator   = conv_a << (itype)(fixed_point_position);                                                                                     \
+        itype res         = select(numerator / denominator, select((itype)stype##_MAX, (itype)stype##_MIN, conv_a < (itype)0), denominator == (itype)0); \
+        return CONVERT_SAT((res), type);                                                                                                                 \
+    }
+
+DIVQ_SAT_IMPL(qs8, qs8x16, qs16x16)
+
+#define DIV_SAT_OP_EXPAND_STR(a, b, type, size, position) div_sat_##type##x##size((a), (b), (position))
+#define DIV_SAT_OP_EXPAND(a, b, type, size, position) DIV_SAT_OP_EXPAND_STR(a, b, type, size, position)
+
+/** Saturate exponential fixed point 8 bit (16 elements)
+  *
+  * @param[in] a                    8 bit fixed point input vector
+  * @param[in] fixed_point_position Fixed point position that expresses the number of bits for the fractional part of the number
+ *
+ * @return The result of the 8 bit fixed point exponential. The result is saturated in case of overflow
+ */
+qs8x16 inline exp_qs8x16(qs8x16 a, int fixed_point_position)
+{
+    // Constants (literal constants are calculated by converting the respective float to the fixed point with the highest supported fixed point position)
+    char16 const_one = (char16)(1 << (fixed_point_position));
+    char16 ln2       = (char16)(((0x58 >> (6 - fixed_point_position)) + 1) >> 1);                 // 0.693147
+    char16 inv_ln2   = ((char16)(((0x38 >> (6 - (fixed_point_position))) + 1) >> 1)) | const_one; // 1.442695
+    char16 A         = (char16)(((0x7F >> (6 - (fixed_point_position))) + 1) >> 1);               // 0.9978546
+    char16 B         = (char16)(((0x3F >> (6 - (fixed_point_position))) + 1) >> 1);               // 0.4994721
+    char16 C         = (char16)(((0x16 >> (6 - (fixed_point_position))) + 1) >> 1);               // 0.1763723
+    char16 D         = (char16)(((0x05 >> (6 - (fixed_point_position))) + 1) >> 1);               // 0.0435108
+
+    // Perform range reduction [-log(2),log(2)]
+    char16 m = mul_sat_qs8x16(a, inv_ln2, fixed_point_position);
+
+    // get decimal part of m
+    char16 dec_m = m >> (char16)fixed_point_position;
+
+    char16 alpha = mul_sat_qs8x16(dec_m << (char16)fixed_point_position, ln2, fixed_point_position);
+    alpha        = convert_char16(abs_diff(a, alpha));
+
+    // Polynomial expansion
+    char16 sum = add_sat_qs8x16(mul_sat_qs8x16(alpha, D, fixed_point_position), C);
+    sum        = add_sat_qs8x16(mul_sat_qs8x16(alpha, sum, fixed_point_position), B);
+    sum        = add_sat_qs8x16(mul_sat_qs8x16(alpha, sum, fixed_point_position), A);
+    sum        = add_sat_qs8x16(mul_sat_qs8x16(alpha, sum, fixed_point_position), const_one);
+
+    // Reconstruct and saturate result
+    return select(select(sum << dec_m, sum >> -dec_m, dec_m < (char16)0), (char16)0x7F, clz(sum) <= dec_m);
+}
+
+#define EXP_OP_EXPAND_STR(a, type, size, position) exp_##type##x##size((a), (position))
+#define EXP_OP_EXPAND(a, type, size, position) EXP_OP_EXPAND_STR(a, type, size, position)
+
+#endif // ARM_COMPUTE_FIXED_POINT_H
diff --git a/src/core/CL/cl_kernels/helpers.h b/src/core/CL/cl_kernels/helpers.h
index 6db8ed5..cf3cb78 100644
--- a/src/core/CL/cl_kernels/helpers.h
+++ b/src/core/CL/cl_kernels/helpers.h
@@ -26,6 +26,8 @@
 
 #pragma OPENCL EXTENSION cl_khr_fp16 : enable
 
+#define EXPAND(x) x
+
 #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val)
 
 #define VEC_DATA_TYPE_STR(type, size) type##size
diff --git a/src/core/CL/cl_kernels/softmax_layer.cl b/src/core/CL/cl_kernels/softmax_layer.cl
index 632b4a5..a29aea4 100644
--- a/src/core/CL/cl_kernels/softmax_layer.cl
+++ b/src/core/CL/cl_kernels/softmax_layer.cl
@@ -23,14 +23,36 @@
  */
 #include "helpers.h"
 
-#if defined USE_F16
-#define MINVAL HALF_MIN
-#define SELECT_DATA_TYPE short
-#define DATA_TYPE half
+#if defined(FIXED_POINT_POSITION)
+
+#include "fixed_point.h"
+#define MAX_OP(x, y, type, size) MAX_OP_EXPAND(x, y, type, size)
+#define ADD_OP(x, y, type, size) ADD_SAT_OP_EXPAND((x), (y), type, size)
+#define SUB_OP(x, y, type, size) SUB_SAT_OP_EXPAND((x), (y), type, size)
+#define DIV_OP(x, y, type, size) DIV_SAT_OP_EXPAND((x), (y), type, size, FIXED_POINT_POSITION)
+#define EXP_OP(x, type, size) EXP_OP_EXPAND((x), type, size, FIXED_POINT_POSITION)
+
+#define MIN_VAL_EXPAND(type) type##_MIN
+#define MIN_VAL(type) MIN_VAL_EXPAND(type)
+#define MINVAL MIN_VAL(DATA_TYPE)
+#define SELECT_DATA_TYPE EXPAND(DATA_TYPE)
+
 #else
-#define MINVAL FLT_MIN
+
+#define MAX_OP(x, y, type, size) max((x), (y))
+#define ADD_OP(x, y, type, size) ((x) + (y))
+#define SUB_OP(x, y, type, size) ((x) - (y))
+#define DIV_OP(x, y, type, size) ((x) / (y))
+#define EXP_OP(x, type, size) exp((x))
+
+#if defined USE_F16
+#define MINVAL -HALF_MAX
+#define SELECT_DATA_TYPE short
+#else
+#define MINVAL -FLT_MAX
 #define SELECT_DATA_TYPE int
-#define DATA_TYPE float
+#endif
+
 #endif
 
 __constant VEC_DATA_TYPE(DATA_TYPE, 16) type_min = (VEC_DATA_TYPE(DATA_TYPE, 16))(MINVAL);
@@ -39,16 +61,16 @@
 /** Identifies the maximum value across the 1st dimension.
  *
  * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
- * @note In case F16 is used -DUSE_HALF must be passed otherwise the kernel will default to used F32.
+ * @note Fixed point position must be given as a preprocessor argument using -DFIXED_POINT_POSITION=pos. e.g. DFIXED_POINT_POSITION=4
  * @note In case the input is not multiple of 16 -DNON_MULTIPLE_OF_16 must be passed.
  *
- * @param[in]  src_ptr                           Pointer to the source tensor slice. Supported data types: F16, F32
+ * @param[in]  src_ptr                           Pointer to the source tensor slice. Supported data types: QS8/F16/F32
  * @param[in]  src_stride_x                      Stride of the source tensor in X dimension (in bytes)
  * @param[in]  src_step_x                        src_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in]  src_stride_y                      Stride of the source tensor in Y dimension (in bytes)
  * @param[in]  src_step_y                        src_stride_y * number of elements along Y processed per workitem(in bytes)
  * @param[in]  src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[out] dst_ptr                           Pointer to the destination tensor slice. Supported data types: F16, F32
+ * @param[out] dst_ptr                           Pointer to the destination tensor slice. Supported data types: same as @p src_ptr
  * @param[in]  dst_stride_x                      Stride of the destination tensor in X dimension (in bytes)
  * @param[in]  dst_step_x                        dst_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in]  dst_stride_y                      Stride of the destination tensor in Y dimension (in bytes)
@@ -74,7 +96,7 @@
     {
         VEC_DATA_TYPE(DATA_TYPE, 16)
         data    = vload16(0, (__global DATA_TYPE *)offset(&src, i << 4, 0));
-        max_val = max(data, max_val);
+        max_val = MAX_OP(data, max_val, DATA_TYPE, 16);
     }
 
 #if defined NON_MULTIPLE_OF_16
@@ -83,14 +105,14 @@
     data = vload16(0, (__global DATA_TYPE *)offset(&src, width4 << 4, 0));
     VEC_DATA_TYPE(SELECT_DATA_TYPE, 16)
     widx    = CONVERT(((uint16)(width4 << 4) + idx16) < width, VEC_DATA_TYPE(SELECT_DATA_TYPE, 16));
-    max_val = max(max_val, select(type_min, data, widx));
+    max_val = MAX_OP(max_val, select(type_min, data, widx), DATA_TYPE, 16);
 #endif
 
     // Perform max reduction
-    max_val.s01234567 = max(max_val.s01234567, max_val.s89ABCDEF);
-    max_val.s0123     = max(max_val.s0123, max_val.s4567);
-    max_val.s01       = max(max_val.s01, max_val.s23);
-    max_val.s0        = max(max_val.s0, max_val.s1);
+    max_val.s01234567 = MAX_OP(max_val.s01234567, max_val.s89ABCDEF, DATA_TYPE, 8);
+    max_val.s0123     = MAX_OP(max_val.s0123, max_val.s4567, DATA_TYPE, 4);
+    max_val.s01       = MAX_OP(max_val.s01, max_val.s23, DATA_TYPE, 2);
+    max_val.s0        = MAX_OP(max_val.s0, max_val.s1, DATA_TYPE, 1);
 
     // Store result
     *((__global DATA_TYPE *)dst.ptr) = max_val.s0;
@@ -100,28 +122,28 @@
  * then gets the exponent of each element as sums all elements across each row.
  *
  * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
- * @note In case F16 is used -DUSE_HALF must be passed otherwise the kernel will default to used F32.
+ * @note Fixed point position must be given as a preprocessor argument using -DFIXED_POINT_POSITION=pos. e.g. DFIXED_POINT_POSITION=4
  * @note In case the input is not multiple of 16 -DNON_MULTIPLE_OF_16 must be passed.
  *
- * @param[in]  src_ptr                           Pointer to the source tensor slice. Supported data types: F16, F32
+ * @param[in]  src_ptr                           Pointer to the source tensor slice. Supported data types: QS8/F16/F32
  * @param[in]  src_stride_x                      Stride of the source tensor in X dimension (in bytes)
  * @param[in]  src_step_x                        src_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in]  src_stride_y                      Stride of the source tensor in Y dimension (in bytes)
  * @param[in]  src_step_y                        src_stride_y * number of elements along Y processed per workitem(in bytes)
  * @param[in]  src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in]  max_ptr                           Pointer to the max values tensor slice. Supported data types: F16, F32
+ * @param[in]  max_ptr                           Pointer to the max values tensor slice. Supported data types: same as @p src_ptr
  * @param[in]  max_stride_x                      Stride of the max values tensor in X dimension (in bytes)
  * @param[in]  max_step_x                        max_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in]  max_stride_y                      Stride of the max values tensor in Y dimension (in bytes)
  * @param[in]  max_step_y                        max_stride_y * number of elements along Y processed per workitem(in bytes)
  * @param[in]  max_offset_first_element_in_bytes The offset of the first element in the max values tensor
- * @param[out] dst_ptr                           Pointer to the destination tensor slice. Supported data types: F16, F32
+ * @param[out] dst_ptr                           Pointer to the destination tensor slice. Supported data types: same as @p src_ptr
  * @param[in]  dst_stride_x                      Stride of the destination tensor in X dimension (in bytes)
  * @param[in]  dst_step_x                        dst_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in]  dst_stride_y                      Stride of the destination tensor in Y dimension (in bytes)
  * @param[in]  dst_step_y                        dst_stride_y * number of elements along Y processed per workitem(in bytes)
  * @param[in]  dst_offset_first_element_in_bytes The offset of the first element in the destination tensor
- * @param[out] sum_ptr                           Pointer to the sum values tensor slice. Supported data types: F16, F32
+ * @param[out] sum_ptr                           Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr
  * @param[in]  sum_stride_x                      Stride of the sum values tensor in X dimension (in bytes)
  * @param[in]  sum_step_x                        sum_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in]  sum_stride_y                      Stride of the sum values tensor in Y dimension (in bytes)
@@ -154,28 +176,30 @@
     {
         VEC_DATA_TYPE(DATA_TYPE, 16)
         data = vload16(0, (__global DATA_TYPE *)offset(&src, i << 4, 0));
-        data = exp(data - max_val);
+        data = SUB_OP(data, max_val, DATA_TYPE, 16);
+        data = EXP_OP(data, DATA_TYPE, 16);
         vstore16(data, 0, (__global DATA_TYPE *)offset(&dst, i << 4, 0));
-        sum1D += data;
+        sum1D = ADD_OP(sum1D, data, DATA_TYPE, 16);
     }
 
 #if defined NON_MULTIPLE_OF_16
     // Handle non multiple of 16
     VEC_DATA_TYPE(DATA_TYPE, 16)
     data = vload16(0, (__global DATA_TYPE *)offset(&src, width4 << 4, 0));
-    data = exp(data - max_val);
+    data = SUB_OP(data, max_val, DATA_TYPE, 16);
+    data = EXP_OP(data, DATA_TYPE, 16);
     VEC_DATA_TYPE(SELECT_DATA_TYPE, 16)
     widx = CONVERT(((uint16)(width4 << 4) + idx16) < width, VEC_DATA_TYPE(SELECT_DATA_TYPE, 16));
     data = select(0, data, widx);
     vstore16(data, 0, (__global DATA_TYPE *)offset(&dst, width4 << 4, 0));
-    sum1D += data;
+    sum1D = ADD_OP(sum1D, data, DATA_TYPE, 16);
 #endif
 
     // Perform min/max reduction
-    sum1D.s01234567 = sum1D.s01234567 + sum1D.s89ABCDEF;
-    sum1D.s0123     = sum1D.s0123 + sum1D.s4567;
-    sum1D.s01       = sum1D.s01 + sum1D.s23;
-    sum1D.s0        = sum1D.s0 + sum1D.s1;
+    sum1D.s01234567 = ADD_OP(sum1D.s01234567, sum1D.s89ABCDEF, DATA_TYPE, 8);
+    sum1D.s0123     = ADD_OP(sum1D.s0123, sum1D.s4567, DATA_TYPE, 4);
+    sum1D.s01       = ADD_OP(sum1D.s01, sum1D.s23, DATA_TYPE, 2);
+    sum1D.s0        = ADD_OP(sum1D.s0, sum1D.s1, DATA_TYPE, 1);
 
     // Calculate and store result
     *((__global DATA_TYPE *)sum.ptr) = sum1D.s0;
@@ -184,20 +208,21 @@
 /** Divides all the values of the input tensor by the sum calculated from softmax_layer_shift_exp_sum kernel.
  *
  * @note Datatype must be given as a preprocessor argument using -DDATA_TYPE=type. e.g. -DDATA_TYPE=short
+ * @note Fixed point position must be given as a preprocessor argument using -DFIXED_POINT_POSITION=pos. e.g. DFIXED_POINT_POSITION=4
  *
- * @param[in]  src_ptr                           Pointer to the source tensor slice. Supported data types: F16, F32
+ * @param[in]  src_ptr                           Pointer to the source tensor slice. Supported data types: QS8/F16/F32
  * @param[in]  src_stride_x                      Stride of the source tensor in X dimension (in bytes)
  * @param[in]  src_step_x                        src_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in]  src_stride_y                      Stride of the source tensor in Y dimension (in bytes)
  * @param[in]  src_step_y                        src_stride_y * number of elements along Y processed per workitem(in bytes)
  * @param[in]  src_offset_first_element_in_bytes The offset of the first element in the source tensor
- * @param[in]  sum_ptr                           Pointer to the sum values tensor slice. Supported data types: F16, F32
+ * @param[in]  sum_ptr                           Pointer to the sum values tensor slice. Supported data types: same as @p src_ptr
  * @param[in]  sum_stride_x                      Stride of the sum values tensor in X dimension (in bytes)
  * @param[in]  sum_step_x                        sum_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in]  sum_stride_y                      Stride of the sum values tensor in Y dimension (in bytes)
  * @param[in]  sum_step_y                        sum_stride_y * number of elements along Y processed per workitem(in bytes)
  * @param[in]  sum_offset_first_element_in_bytes The offset of the first element in the sum values tensor
- * @param[out] dst_ptr                           Pointer to the destination tensor slice. Supported data types: F16, F32
+ * @param[out] dst_ptr                           Pointer to the destination tensor slice. Supported data types: same as @p src_ptr
  * @param[in]  dst_stride_x                      Stride of the destination tensor in X dimension (in bytes)
  * @param[in]  dst_step_x                        dst_stride_x * number of elements along X processed per workitem(in bytes)
  * @param[in]  dst_stride_y                      Stride of the destination tensor in Y dimension (in bytes)
@@ -217,5 +242,5 @@
     DATA_TYPE sum_val = *((__global DATA_TYPE *)offset(&sum, 0, get_global_id(1)));
     VEC_DATA_TYPE(DATA_TYPE, 16)
     data = vload16(0, (__global DATA_TYPE *)offset(&src, 0, 0));
-    vstore16(data / sum_val, 0, (__global DATA_TYPE *)offset(&dst, 0, 0));
+    vstore16(DIV_OP(data, sum_val, DATA_TYPE, 16), 0, (__global DATA_TYPE *)offset(&dst, 0, 0));
 }
diff --git a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
index 0470d52..c488f90 100644
--- a/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
+++ b/src/core/CL/kernels/CLSoftmaxLayerKernel.cpp
@@ -41,9 +41,9 @@
 
 void CLLogits1DMaxKernel::configure(const ICLTensor *input, ICLTensor *output)
 {
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::F16, DataType::F32);
     ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
 
     _input  = input;
     _output = output;
@@ -52,7 +52,12 @@
     const unsigned int num_elems_processed_per_iteration = ceil_to_multiple(input->info()->dimension(0), 16);
 
     // Set build options
-    std::set<std::string> build_opts{ "-DUSE_" + string_from_data_type(input->info()->data_type()) };
+    std::set<std::string> build_opts;
+    build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
+    if(is_data_type_fixed_point(input->info()->data_type()))
+    {
+        build_opts.emplace(("-DFIXED_POINT_POSITION=" + val_to_string(input->info()->fixed_point_position())));
+    }
 
     // Tell the kernel that the width is not a multiple of 16
     if((input->info()->dimension(0) % max_cl_vector_width) != 0)
@@ -88,11 +93,10 @@
 
 void CLLogits1DShiftExpSumKernel::configure(const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum)
 {
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(max, 1, DataType::F16, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(sum, 1, DataType::F16, DataType::F32);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::F16, DataType::F32);
     ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, max, sum);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, max, sum);
 
     _input  = input;
     _max    = max;
@@ -103,7 +107,12 @@
     const unsigned int num_elems_processed_per_iteration = ceil_to_multiple(input->info()->dimension(0), 16);
 
     // Set build options
-    std::set<std::string> build_opts{ "-DUSE_" + string_from_data_type(input->info()->data_type()) };
+    std::set<std::string> build_opts;
+    build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
+    if(is_data_type_fixed_point(input->info()->data_type()))
+    {
+        build_opts.emplace(("-DFIXED_POINT_POSITION=" + val_to_string(input->info()->fixed_point_position())));
+    }
 
     // Tell the kernel that the width is not a multiple of 16
     if((input->info()->dimension(0) % max_cl_vector_width) != 0)
@@ -161,10 +170,10 @@
 
 void CLLogits1DNormKernel::configure(const ICLTensor *input, const ICLTensor *sum, ICLTensor *output)
 {
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(sum, 1, DataType::F16, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::F16, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, sum);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::F16, DataType::F32);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, sum, output);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, sum, output);
 
     _input  = input;
     _sum    = sum;
@@ -172,7 +181,11 @@
 
     // Set build options
     std::set<std::string> build_opts;
-    build_opts.emplace(("-DUSE_" + string_from_data_type(input->info()->data_type())));
+    build_opts.emplace(("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type())));
+    if(is_data_type_fixed_point(input->info()->data_type()))
+    {
+        build_opts.emplace(("-DFIXED_POINT_POSITION=" + val_to_string(input->info()->fixed_point_position())));
+    }
 
     // Create kernel
     _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("softmax_layer_norm", build_opts));