COMPMID-970 : Remove QS8 / QS16 support

Removed fixed point related code.

Change-Id: I487acf138dace3b0450e0d72ca7071eaec254566
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/137678
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/arm_compute/runtime/NEON/functions/NEActivationLayer.h b/arm_compute/runtime/NEON/functions/NEActivationLayer.h
index 59f5802..a65146d 100644
--- a/arm_compute/runtime/NEON/functions/NEActivationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEActivationLayer.h
@@ -44,7 +44,7 @@
      * @note If the output tensor is a nullptr or is equal to the input, the activation function will be performed in-place
      *
      * @param[in, out] input           Source tensor. In case of @p output tensor = nullptr, this tensor will store the result
-     *                                 of the activation function. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+     *                                 of the activation function. Data types supported: QASYMM8/F16/F32.
      * @param[out]     output          Destination tensor. Data type supported: same as @p input
      * @param[in]      activation_info Activation layer parameters.
      */
@@ -52,7 +52,7 @@
     /** Static function to check if given info will lead to a valid configuration of @ref NEActivationLayer
      *
      * @param[in] input    Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
-     *                     of the activation function. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+     *                     of the activation function. Data types supported: QASYMM8/F16/F32.
      * @param[in] output   Destination tensor info. Data type supported: same as @p input
      * @param[in] act_info Activation layer information.
      *
diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
index c72d0b6..c296463 100644
--- a/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
+++ b/arm_compute/runtime/NEON/functions/NEArithmeticAddition.h
@@ -37,17 +37,17 @@
 public:
     /** Initialise the kernel's inputs, output and conversion policy.
      *
-     * @param[in]  input1 First tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
-     * @param[in]  input2 Second tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
-     * @param[out] output Output tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+     * @param[in]  input1 First tensor input. Data types supported: U8/S16/F16/F32
+     * @param[in]  input2 Second tensor input. Data types supported: U8/S16/F16/F32
+     * @param[out] output Output tensor. Data types supported: U8/S16/F16/F32
      * @param[in]  policy Policy to use to handle overflow.
      */
     void configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy);
     /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticAddition
      *
-     * @param[in] input1 First tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
-     * @param[in] input2 Second tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
-     * @param[in] output Output tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+     * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32
+     * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32
+     * @param[in] output Output tensor. Data types supported: U8/S16/F16/F32
      * @param[in] policy Policy to use to handle overflow.
      *
      * @return a status
diff --git a/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h b/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
index 751ed1a..9b460c1 100644
--- a/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
+++ b/arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -37,17 +37,17 @@
 public:
     /** Initialise the kernel's inputs, output and conversion policy.
      *
-     * @param[in]  input1 First tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
-     * @param[in]  input2 Second tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
-     * @param[out] output Output tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+     * @param[in]  input1 First tensor input. Data types supported: U8/S16/F16/F32
+     * @param[in]  input2 Second tensor input. Data types supported: U8/S16/F16/F32
+     * @param[out] output Output tensor. Data types supported: U8/S16/F16/F32
      * @param[in]  policy Policy to use to handle overflow.
      */
     void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy);
     /** Static function to check if given info will lead to a valid configuration of @ref NEArithmeticSubtraction
      *
-     * @param[in] input1 First tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
-     * @param[in] input2 Second tensor input. Data types supported: U8/QS8/QS16/S16/F16/F32
-     * @param[in] output Output tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
+     * @param[in] input1 First tensor input. Data types supported: U8/S16/F16/F32
+     * @param[in] input2 Second tensor input. Data types supported: U8/S16/F16/F32
+     * @param[in] output Output tensor. Data types supported: U8/S16/F16/F32
      * @param[in] policy Policy to use to handle overflow.
      *
      * @return a status
diff --git a/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h b/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
index feb2087..77f0612 100644
--- a/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
@@ -50,7 +50,7 @@
      *
      * @param[in, out] input    Source tensor. In case of @p output tensor = nullptr, this tensor will store the result.
      *                          3 lower dimensions represent a single input with dimensions [width, height, FM].
-     *                          The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+     *                          The rest are optional and used for representing batches. Data types supported: F16/F32.
      * @param[out]     output   Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in]      mean     Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in]      var      Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
@@ -65,7 +65,7 @@
      *
      * @param[in] input    Source tensor info. In case of @p output tensor = nullptr, this tensor will store the result.
      *                     3 lower dimensions represent a single input with dimensions [width, height, FM].
-     *                     The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+     *                     The rest are optional and used for representing batches. Data types supported: F16/F32.
      * @param[in] output   Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in] mean     Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in] var      Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
diff --git a/arm_compute/runtime/NEON/functions/NECol2Im.h b/arm_compute/runtime/NEON/functions/NECol2Im.h
index 9b05bd4..42876a8 100644
--- a/arm_compute/runtime/NEON/functions/NECol2Im.h
+++ b/arm_compute/runtime/NEON/functions/NECol2Im.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -39,7 +39,7 @@
 public:
     /** Configure the col2im NEON kernel
      *
-     * @param[in]  input          The input tensor to convert. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+     * @param[in]  input          The input tensor to convert. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
      * @param[out] output         The output tensor. 3 lower dimensions represent a single output [width, height, OFM],
      *                            while the rest represent batch of outputs. Data types supported: Same as @p input
      * @param[in]  convolved_dims Output convolved dimensions.
@@ -47,7 +47,7 @@
     void configure(const ITensor *input, ITensor *output, const Size2D &convolved_dims);
     /** Static function to check if given info will lead to a valid configuration of @ref NECol2Im
      *
-     * @param[in] input          The input tensor to convert. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+     * @param[in] input          The input tensor to convert. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
      * @param[in] output         The output tensor. 3 lower dimensions represent a single output [width, height, OFM],
      *                           while the rest represent batch of outputs. Data types supported: Same as @p input
      * @param[in] convolved_dims Output convolved dimensions.
diff --git a/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h b/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
index bdb157f..3ec0390 100644
--- a/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
+++ b/arm_compute/runtime/NEON/functions/NEConvertFullyConnectedWeights.h
@@ -40,7 +40,7 @@
     NEConvertFullyConnectedWeights();
     /** Initialize the function.
      *
-     * @param[in]  input                Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/QS32/F16/F32.
+     * @param[in]  input                Source weights tensor to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
      * @param[out] output               The converted weights tensor. Shape and Data Type: Same as @p input.
      * @param[in]  original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
      * @param[in]  data_layout          The data layout the weights have been trained in.
@@ -48,7 +48,7 @@
     void configure(const ITensor *input, ITensor *output, const TensorShape &original_input_shape, DataLayout data_layout);
     /** Static function to check if given info will lead to a valid configuration of @ref NEConvertFullyConnectedWeights
      *
-     * @param[in] input                Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/QS32/F16/F32.
+     * @param[in] input                Source weights tensor info to convert. Must be 2 dimensional. Data types supported: U8/S8/QASYMM8/U16/S16/U32/S32/QS32/F16/F32.
      * @param[in] output               The converted weights tensor info. Shape and Data Type: Same as @p input.
      * @param[in] original_input_shape Shape of the original input tensor (the one entering fully connected layer). Must be in NCHW format.
      * @param[in] data_layout          The data layout the weights have been trained in.
diff --git a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
index e143814..c4226cb 100644
--- a/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEConvolutionLayer.h
@@ -52,7 +52,7 @@
      *
      * @param[in]  input            Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
      *                              while every optional dimension from 4 and above represent a batch of inputs.
-     *                              Data types supported: QS8/QASYMM8/QS16/F16/F32.
+     *                              Data types supported: QASYMM8/F16/F32.
      * @param[in]  weights          Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
      * @param[in]  biases           Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
      *                              Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -72,7 +72,7 @@
      *
      * @param[in] input            Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
      *                             while every optional dimension from 4 and above represent a batch of inputs.
-     *                             Data types supported: QS8/QASYMM8/QS16/F16/F32.
+     *                             Data types supported: QASYMM8/F16/F32.
      * @param[in] weights          Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
      * @param[in] biases           Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
      *                             Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -94,7 +94,7 @@
      *
      * @param[in] input            Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
      *                             while every optional dimension from 4 and above represent a batch of inputs.
-     *                             Data types supported: QS8/QASYMM8/QS16/F16/F32.
+     *                             Data types supported: QASYMM8/F16/F32.
      * @param[in] weights          Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
      * @param[in] output           Destination tensor. 3 lower dimensions represent a single output [width, height, OFM], while the rest represent batch of outputs.
      *                             Data types supported: Same as @p input.
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
index 5b63b70..eefb5fa 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConcatenateLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -49,7 +49,7 @@
     NEDepthConcatenateLayer();
     /** Initialise the kernel's inputs vector and output.
      *
-     * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported:  QS8/QS16/F16/F32.
+     * @param[in,out] inputs_vector The vectors containing all the tensors to concatenate. Data types supported:  F16/F32.
      * @param[out]    output        Output tensor. Data types supported: Same as @p inputs_vector.
      */
     void configure(std::vector<ITensor *> inputs_vector, ITensor *output);
diff --git a/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h b/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h
index b235e87..eedadc2 100644
--- a/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDepthConvertLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -46,22 +46,14 @@
     /** Initialize the function's source, destination
      *
      * Valid conversions Input -> Output :
-     *    QS8 -> QS8, F32
      *    U8 -> U16, S16, S32
      *    U16 -> U8, U32
      *    S16 -> U8, S32
-     *    QS16 -> QS16, F32
-     *    F32 -> QS8, QS16
      *
-     * @warning In case of in-place fixed point position conversion make sure that configure has been called
-     *          before the updated tensor is used in other functions, as the TensorInfo of the tensor will be
-     *          altered. In-place is only supported for QS8 -> QS8, QS16 -> QS16.
-     *
-     * @param[in, out] input  The input tensor to convert (Written in case of in-place computation). Data types supported: U8/QS8/U16/S16/F32.
-     * @param[out]     output The output tensor. Can be null in case of in-place computation. Data types supported: U8/QS8/U16/S16/U32/S32/F32.
+     * @param[in, out] input  The input tensor to convert (Written in case of in-place computation). Data types supported: U8/U16/S16/F32.
+     * @param[out]     output The output tensor. Can be null in case of in-place computation. Data types supported: U8/U16/S16/U32/S32/F32.
      * @param[in]      policy Conversion policy.
      * @param[in]      shift  (Optional) Value for down/up conversions. Must be 0 <= shift < 8.
-     *                        In case of fixed point position conversion, it specifies the new fixed point position, if operation is in-place.
      */
     void configure(ITensor *input, ITensor *output, ConvertPolicy policy, uint32_t shift = 0);
 };
diff --git a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
index ae384ff..a4a55d1 100644
--- a/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEDirectConvolutionLayer.h
@@ -54,11 +54,11 @@
     /** Set the input, weights, biases and output tensors.
      *
      * @note: DirectConvolution only works in the following configurations:
-     *    1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/QS16/F16/F32
-     *    3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/F16/F32
+     *    1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
+     *    3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
      *    5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
      *
-     * @param[in, out] input     Input tensor. Data types supported: QS8/QS16/F16/F32.
+     * @param[in, out] input     Input tensor. Data types supported: F16/F32.
      * @param[in]      weights   Set of kernels to convolve the input volume.
      *                           Supported sizes: 1x1, 3x3 and 5x5.
      *                           The 3rd dimension must be the same as the input's volume 3rd dimension.
@@ -73,11 +73,11 @@
     /** Static function to check if given info will lead to a valid configuration of @ref NEDirectConvolutionLayer
      *
      * @note: DirectConvolution only works in the following configurations:
-     *    1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/QS16/F16/F32
-     *    3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = QS8/F16/F32
+     *    1x1 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
+     *    3x3 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F16/F32
      *    5x5 convolution with stride_x = 1/2/3, stride_y = 1/2/3 data type = F32
      *
-     * @param[in] input     Input tensor. Data types supported: QS8/QS16/F16/F32.
+     * @param[in] input     Input tensor. Data types supported: F16/F32.
      * @param[in] weights   Set of kernels to convolve the input volume.
      *                      Supported sizes: 1x1, 3x3 and 5x5.
      *                      The 3rd dimension must be the same as the input's volume 3rd dimension.
@@ -104,7 +104,6 @@
     NEActivationLayer                         _activationlayer_function;
     Tensor                                    _accumulator;
     bool                                      _has_bias;
-    bool                                      _is_fixed_point;
     bool                                      _is_activationlayer_enabled;
     unsigned int                              _dim_split;
 };
diff --git a/arm_compute/runtime/NEON/functions/NEFillBorder.h b/arm_compute/runtime/NEON/functions/NEFillBorder.h
index b6b7e77..27a9eea 100644
--- a/arm_compute/runtime/NEON/functions/NEFillBorder.h
+++ b/arm_compute/runtime/NEON/functions/NEFillBorder.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2017 ARM Limited.
+ * Copyright (c) 2016-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -41,7 +41,7 @@
      *
      * @note This function fills the borders within the XY-planes.
      *
-     * @param[in, out] input                 Source tensor. Data type supported: U8/QS8/S16/S32/F32
+     * @param[in, out] input                 Source tensor. Data type supported: U8/S16/S32/F32
      * @param[in]      border_width          Width of the tensor border in pixels.
      * @param[in]      border_mode           Strategy to use for borders.
      * @param[in]      constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
diff --git a/arm_compute/runtime/NEON/functions/NEFlattenLayer.h b/arm_compute/runtime/NEON/functions/NEFlattenLayer.h
index e9c8e27..2c259fa 100644
--- a/arm_compute/runtime/NEON/functions/NEFlattenLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEFlattenLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -41,7 +41,7 @@
 public:
     /** Initialise the kernel's input and output.
      *
-     * @param[in]  input  First input tensor to flatten with at least 3 dimensions. The dimensions over the third will be interpreted as batches. Data types supported: QS8/QS16/F16/F32
+     * @param[in]  input  First input tensor to flatten with at least 3 dimensions. The dimensions over the third will be interpreted as batches. Data types supported: F16/F32
      * @param[out] output Output tensor with shape [w*h*d, input_batches] where:
      *             w = width input tensor, h = height input tensor and d = depth input tensor. Data type supported: same as @p input
      */
diff --git a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
index 42c9e2d..d4166b3 100644
--- a/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h
@@ -51,7 +51,7 @@
     NEFullyConnectedLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
     /** Set the input and output tensors.
      *
-     * @param[in]  input               Weights tensor. The weights must be 2 dimensional. Data types supported: QS8/QS16/F32.
+     * @param[in]  input               Weights tensor. The weights must be 2 dimensional. Data types supported: F32.
      * @param[out] output              Destination tensor. Data type supported: Same as @p input.
      * @param[in]  transpose_weights   True if the weights must be transposed. Data types supported: Same as @p weights.
      * @param[in]  is_batched_fc_layer True if it is a batched fully connected layer
@@ -59,7 +59,7 @@
     void configure(const ITensor *input, ITensor *output, bool transpose_weights, bool is_batched_fc_layer);
     /** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayerReshapeWeights
      *
-     * @param[in] input               Weights tensor info. The weights must be 2 dimensional. Data types supported: QS8/QS16/F32.
+     * @param[in] input               Weights tensor info. The weights must be 2 dimensional. Data types supported: F32.
      * @param[in] output              Destination tensor info. Data type supported: Same as @p input.
      * @param[in] transpose_weights   True if the weights must be transposed. Data types supported: Same as @p weights.
      * @param[in] is_batched_fc_layer True if it is a batched fully connected layer
@@ -104,7 +104,7 @@
     NEFullyConnectedLayer &operator=(NEFullyConnectedLayer &&) = default;
     /** Set the input and output tensors.
      *
-     * @param[in]  input                Source tensor. Data type supported: QS8/QS16/F16/F32.
+     * @param[in]  input                Source tensor. Data type supported: F16/F32.
      * @param[in]  weights              Weights tensor. The weights must be 2 dimensional. Data type supported: Same as @p input.
      * @param[in]  biases               Bias tensor. Can be nullptr. Data type supported:Same as @p input.
      * @param[out] output               Destination tensor. Data type supported: Same as @p input.
@@ -114,7 +114,7 @@
     void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, bool transpose_weights = true, bool are_weights_reshaped = false);
     /** Static function to check if given info will lead to a valid configuration of @ref CLFullyConnectedLayer
      *
-     * @param[in] input                Source tensor info. Data type supported: QS8/QS16/F16/F32.
+     * @param[in] input                Source tensor info. Data type supported: F16/F32.
      * @param[in] weights              Weights tensor info. The weights must be 2 dimensional. Data type supported: Same as @p input
      * @param[in] biases               Bias tensor info. It can be nullptr. Data type supported:Same as @p input.
      * @param[in] output               Destination tensor info. Data type supported: Same as @p input.
diff --git a/arm_compute/runtime/NEON/functions/NEGEMM.h b/arm_compute/runtime/NEON/functions/NEGEMM.h
index 5d108b2..cf059e5 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMM.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMM.h
@@ -66,7 +66,7 @@
      * @note GEMM: General Matrix Multiply - [alpha * A * B + beta * C].
      * @note GEMM: The tensors a, b, c, d must have the same data type. You should not mix data types when calling this function.
      *
-     * @param[in]  a         First input tensor  (Matrix A or Vector A). Data type supported: QS8/QS16/F16/F32
+     * @param[in]  a         First input tensor  (Matrix A or Vector A). Data type supported: F16/F32
      * @param[in]  b         Second input tensor (Matrix B). Data type supported: same as @p a
      * @param[in]  c         Third input tensor  (Matrix C). It can be a nullptr if just the multiplication between @p a and @p b is needed. Data type supported: same as @p a
      * @param[out] d         Output tensor. Data type supported: same as @p a
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
index 7075bec..68e1145 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMConvolutionLayer.h
@@ -60,7 +60,7 @@
     NEConvolutionLayerReshapeWeights(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
     /** Set the input and output tensors.
      *
-     * @param[in]  weights      Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QS8/QASYMM8/QS16/F32.
+     * @param[in]  weights      Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QASYMM8/F32.
      * @param[in]  biases       Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
      * @param[out] output       Destination tensor. Data types supported: Same as @p weights.
      * @param[in]  transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
@@ -69,7 +69,7 @@
     void configure(const ITensor *weights, const ITensor *biases, ITensor *output, bool transpose1xW);
     /** Static function to check if given info will lead to a valid configuration of @ref NEConvolutionLayerReshapeWeights
      *
-     * @param[in] weights      Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QS8/QASYMM8/QS16/F16/F32.
+     * @param[in] weights      Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: QASYMM8/F16/F32.
      * @param[in] biases       Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p weights.
      * @param[in] output       Destination tensor. Data types supported: Same as @p weights.
      * @param[in] transpose1xW True if the weights are to undergo a 1xW transposition after reshaping (in case of GEMM operation), false otherwise.
@@ -116,7 +116,7 @@
      *
      * @param[in]  input        Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
      *                          while every optional dimension from 4 and above represent a batch of inputs.
-     *                          Data types supported: QS8/QASYMM8/QS16/F32.
+     *                          Data types supported: QASYMM8/F32.
      * @param[in]  weights      Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported: Same as @p input.
      * @param[in]  biases       Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
      *                          Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -134,7 +134,7 @@
      *
      * @param[in] input        Source tensor. 3 lower dimensions represent a single input [width, height, IFM],
      *                         while every optional dimension from 4 and above represent a batch of inputs.
-     *                         Data types supported: QS8/QASYMM8/QS16/F16/F32.
+     *                         Data types supported: QASYMM8/F16/F32.
      * @param[in] weights      Weights tensor. Weights are 4D tensor with dimensions [kernel_x, kernel_y, IFM, OFM]. Data type supported:Same as @p input.
      * @param[in] biases       Biases tensor. Shared biases supported. Biases are 1D tensor with dimensions [OFM].
      *                         Data type supported: Should match @p input data type, except for input of QASYMM8 type where biases should be of S32 type.
@@ -158,7 +158,7 @@
 private:
     /** Configures the appropriate matrix multiply routine
      *
-     * @param[in]  input          Input tensor. Data types supported: QS8/QASYMM8/QS16/F16/F32.
+     * @param[in]  input          Input tensor. Data types supported: QASYMM8/F16/F32.
      * @param[in]  weights        Weights tensor. Data type supported: Same as @p input.
      * @param[out] output         Output tensor. Data types supported: Same as @p input,
      *                            except for input of QASYMM8 type where output should be of S32 type.
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h b/arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h
index b911fd0..4a6bec0 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMInterleave4x4.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -40,7 +40,7 @@
 public:
     /** Initialise the kernel's inputs, output
      *
-     * @param[in]  input  First input tensor. Data types supported: U8/S8/QS8/U16/S16/F16/U32/S32/F32
+     * @param[in]  input  First input tensor. Data types supported: U8/S8/U16/S16/F16/U32/S32/F32
      * @param[out] output Output tensor. Data type supported: same as @p input
      */
     void configure(const ITensor *input, ITensor *output);
diff --git a/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h b/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h
index 8b9ad13..3f8e731 100644
--- a/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h
+++ b/arm_compute/runtime/NEON/functions/NEGEMMTranspose1xW.h
@@ -38,13 +38,13 @@
 public:
     /** Initialise the kernel's inputs, output
      *
-     * @param[in]  input  First input tensor. Data type supported: U8/S8/QS8/U16/S16/F16/U32/S32/F32/
+     * @param[in]  input  First input tensor. Data type supported: U8/S8/U16/S16/F16/U32/S32/F32/
      * @param[out] output Output tensor. Data type supported: same as @p input
      */
     void configure(const ITensor *input, ITensor *output);
     /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMTranspose1xW
      *
-     * @param[in] input  First input tensor. Data type supported: U8/S8/QS8/U16/S16/F16/U32/S32/F32/
+     * @param[in] input  First input tensor. Data type supported: U8/S8/U16/S16/F16/U32/S32/F32/
      * @param[in] output Output tensor. Data type supported: same as @p input
      *
      * @return a status
diff --git a/arm_compute/runtime/NEON/functions/NEIm2Col.h b/arm_compute/runtime/NEON/functions/NEIm2Col.h
index caa8a01..d888b7e 100644
--- a/arm_compute/runtime/NEON/functions/NEIm2Col.h
+++ b/arm_compute/runtime/NEON/functions/NEIm2Col.h
@@ -43,7 +43,7 @@
     /** Configure the im2col NEON kernel
      *
      * @param[in]  input              The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
-     *                                while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/QASYMM8/F16/F32
+     *                                while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32
      *                                Note: QASYMM8 works only for has_bias = false
      * @param[out] output             The output tensor. Data types supported: Same as @p input
      * @param[in]  kernel_dims        The kernel dimensions (width and height).
@@ -56,7 +56,7 @@
     /** Static function to check if given info will lead to a valid configuration of @ref NEIm2Col
      *
      * @param[in] input              The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
-     *                               while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QS8/QS16/QASYMM8/F16/F32
+     *                               while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8/F16/F32
      *                               Note: QASYMM8 works only for has_bias = false
      * @param[in] output             The output tensor. Data types supported: Same as @p input
      * @param[in] kernel_dims        The kernel dimensions (width and height).
diff --git a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
index 4b5ad28..4f1f32f 100644
--- a/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NENormalizationLayer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -55,7 +55,7 @@
     /** Set the input and output tensors.
      *
      * @param[in]  input     Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
-     *                       and an optional 4th dimension for batch of inputs. Data type supported: QS8/QS16/F16/F32
+     *                       and an optional 4th dimension for batch of inputs. Data type supported: F16/F32
      * @param[out] output    Destination with the same dimensions, data type and number of channels of  @p input
      * @param[in]  norm_info Normalization layer information like the normalization type, normalization size and other parameters.
      */
@@ -63,7 +63,7 @@
     /** Static function to check if given info will lead to a valid configuration of @ref NENormalizationLayer
      *
      * @param[in] input     Source tensor. 3 lower dims represent a single input with dimensions [width, height, IFM],
-     *                      and an optional 4th dimension for batch of inputs. Data type supported: QS8/QS16/F16/F32
+     *                      and an optional 4th dimension for batch of inputs. Data type supported: F16/F32
      * @param[in] output    Destination with the same dimensions, data type and number of channels of  @p input
      * @param[in] norm_info Normalization layer information like the normalization type, normalization size and other parameters.
      *
diff --git a/arm_compute/runtime/NEON/functions/NEPermute.h b/arm_compute/runtime/NEON/functions/NEPermute.h
index 58626cd..580d24e 100644
--- a/arm_compute/runtime/NEON/functions/NEPermute.h
+++ b/arm_compute/runtime/NEON/functions/NEPermute.h
@@ -40,7 +40,7 @@
      *
      * @note Supported permutation vectors : [2, 0, 1], [1, 2, 0]
      *
-     * @param[in]  input  The input tensor to permute. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+     * @param[in]  input  The input tensor to permute. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
      * @param[out] output The output tensor. Data types supported: Same as @p input
      * @param[in]  perm   Permutation vector
      */
@@ -49,7 +49,7 @@
      *
      * @note Supported permutation vectors : [2, 0, 1], [1, 2, 0]
      *
-     * @param[in] input  The input tensor to permute. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/QS16/F16/U32/S32/F32
+     * @param[in] input  The input tensor to permute. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
      * @param[in] output The output tensor. Data types supported: Same as @p input
      * @param[in] perm   Permutation vector
      *
diff --git a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
index ba96ae6..371bb2e 100644
--- a/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
+++ b/arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h
@@ -37,22 +37,22 @@
 public:
     /** Initialise the kernel's inputs, output and convertion policy.
      *
-     * @param[in, out] input1          An input tensor. Data types supported: U8/QS8/S16/F16/F32.
+     * @param[in, out] input1          An input tensor. Data types supported: U8/S16/F16/F32.
      *                                 The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
      * @param[in, out] input2          An input tensor. Data types supported: same as @p input1.
      *                                 The input tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
-     * @param[out]     output          Output tensor. Data types supported: U8/QS8/S16/F16/F32.
+     * @param[out]     output          Output tensor. Data types supported: U8/S16/F16/F32.
      * @param[in]      scale           Scale to apply after multiplication.
-     *                                 Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15. For QS8 and QS16 scale must be 1.
+     *                                 Scale must be positive and its value must be either 1/255 or 1/2^n where n is between 0 and 15.
      * @param[in]      overflow_policy Overflow policy.
      * @param[in]      rounding_policy Rounding policy.
      */
     void configure(ITensor *input1, ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
     /** Static function to check if given info will lead to a valid configuration of @ref NEPixelWiseMultiplication
      *
-     * @param[in] input1          First tensor info input. Data types supported: U8/QS8/S16/F16/F32.
-     * @param[in] input2          Second tensor info input. Data types supported: U8/QS8/S16/F16/F32.
-     * @param[in] output          Output tensor info. Data types supported: U8/QS8/S16/F16/F32.
+     * @param[in] input1          First tensor info input. Data types supported: U8/S16/F16/F32.
+     * @param[in] input2          Second tensor info input. Data types supported: U8/S16/F16/F32.
+     * @param[in] output          Output tensor info. Data types supported: U8/S16/F16/F32.
      * @param[in] scale           Scale to apply after multiplication. Must be positive.
      * @param[in] overflow_policy Overflow policy.
      * @param[in] rounding_policy Rounding policy.
diff --git a/arm_compute/runtime/NEON/functions/NEPoolingLayer.h b/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
index 4224f75..26858d5 100644
--- a/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEPoolingLayer.h
@@ -46,18 +46,18 @@
     NEPoolingLayer();
     /** Set the input and output tensors.
      *
-     * @note QS8, QS16 and F16 are supported for pool sizes 2 and 3 only
+     * @note F16 is supported for pool sizes 2 and 3 only
      *
-     * @param[in, out] input     Source tensor. (Written to only when padding != 0) Data types supported: QS8/QASYMM8/QS16/F16/F32.
+     * @param[in, out] input     Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/F16/F32.
      * @param[out]     output    Destination tensor. Data types supported: Same as @p input.
      * @param[in]      pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
      */
     void configure(ITensor *input, ITensor *output, const PoolingLayerInfo &pool_info);
     /** Static function to check if given info will lead to a valid configuration of @ref NEPoolingLayer
      *
-     * @note QS8, QS16 and F16 are supported for pool sizes 2 and 3 only
+     * @note F16 is supported for pool sizes 2 and 3 only
      *
-     * @param[in] input     Source tensor. (Written to only when padding != 0) Data types supported: QS8/QASYMM8/QS16/F16/F32.
+     * @param[in] input     Source tensor. (Written to only when padding != 0) Data types supported: QASYMM8/F16/F32.
      * @param[in] output    Destination tensor. Data types supported: Same as @p input.
      * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
      *
diff --git a/arm_compute/runtime/NEON/functions/NEReshapeLayer.h b/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
index 0bab534..a77a5f3 100644
--- a/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEReshapeLayer.h
@@ -37,7 +37,7 @@
 public:
     /** Initialise the kernel's inputs and outputs
      *
-     * @param[in]  input  First tensor input. Data type supported: U8/S8/QS8/QASYMM8//U16/S16/QS16/U32/S32/F16/F32
+     * @param[in]  input  First tensor input. Data type supported: U8/S8/QASYMM8//U16/S16/U32/S32/F16/F32
      * @param[out] output Output tensor. Data type supported: Same as @p input
      */
     void configure(const ITensor *input, ITensor *output);
diff --git a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
index 3d981b6..61f4600 100644
--- a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
+++ b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
@@ -51,18 +51,18 @@
     NESoftmaxLayer(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
     /** Set the input and output tensors.
      *
-     * @param[in,out] input  Source tensor. Data types supported: QASYMM8/QS8/QS16/F16/F32. If the width is not a
+     * @param[in,out] input  Source tensor. Data types supported: QASYMM8/F16/F32. If the width is not a
      *                       multiple of the internal processing block size, @ref NEFillBorderKernel replicates the
      *                       last value of each row to the nearest multiple.
      * @param[out]    output Destination tensor. Data types supported: same as @p input.
-     * @param[in]     beta   (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
+     * @param[in]     beta   (Optional) A scaling factor for the exponent.
      */
     void configure(ITensor *input, ITensor *output, float beta = 1.0f);
     /** Static function to check if given info will lead to a valid configuration of @ref NESoftmaxLayer
      *
-     * @param[in] input  Source tensor. Data types supported: QASYMM8/QS8/QS16/F16/F32.
+     * @param[in] input  Source tensor. Data types supported: QASYMM8/F16/F32.
      * @param[in] output Destination tensor. Data types supported: same as @p input
-     * @param[in] beta   (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
+     * @param[in] beta   (Optional) A scaling factor for the exponent.
      *
      * @return a status
      */
diff --git a/arm_compute/runtime/NEON/functions/NETranspose.h b/arm_compute/runtime/NEON/functions/NETranspose.h
index 6d1e107..0234288 100644
--- a/arm_compute/runtime/NEON/functions/NETranspose.h
+++ b/arm_compute/runtime/NEON/functions/NETranspose.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -41,13 +41,13 @@
 public:
     /** Initialise the kernel's inputs and output
      *
-     * @param[in]  input  Input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/F16/U32/S32/F32
+     * @param[in]  input  Input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
      * @param[out] output Output tensor. Data type supported: Same as @p input
      */
     void configure(const ITensor *input, ITensor *output);
     /** Static function to check if given info will lead to a valid configuration of @ref NETranspose
      *
-     * @param[in] input  The input tensor. Data types supported: U8/S8/QS8/QASYMM8/U16/S16/F16/U32/S32/F32
+     * @param[in] input  The input tensor. Data types supported: U8/S8/QASYMM8/U16/S16/F16/U32/S32/F32
      * @param[in] output The output tensor. Data types supported: Same as @p input
      *
      * @return a status