COMPMID-2225: Add interface support for new quantized data types.

Add support for:
-QSYMM8, 8-bit quantized symmetric
-QSYMM8_PER_CHANNEL, 8-bit quantized symmetric with per channel quantization

Change-Id: I00c4ff98e44af37419470af61419ee95d0de2463
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1236
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/core/CL/CLTypes.h b/arm_compute/core/CL/CLTypes.h
index 4a03cc9..24ae542 100644
--- a/arm_compute/core/CL/CLTypes.h
+++ b/arm_compute/core/CL/CLTypes.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,6 +24,7 @@
 #ifndef __ARM_COMPUTE_CL_TYPES_H__
 #define __ARM_COMPUTE_CL_TYPES_H__
 
+#include "arm_compute/core/CL/ICLArray.h"
 #include "arm_compute/core/GPUTarget.h"
 
 #include <string>
@@ -53,5 +54,23 @@
     size_t      num_cores;   /**< Number of cores */
     size_t      cache_size;  /**< Cache size */
 };
+
+/** OpenCL quantization data */
+struct CLQuantization
+{
+    /** Default Constructor */
+    CLQuantization()
+        : scale(nullptr), offset(nullptr) {};
+    /** Constructor
+     *
+     * @param[in] scale  OpenCL scale array
+     * @param[in] offset OpenCL offset array
+     */
+    CLQuantization(const ICLFloatArray *scale, const ICLInt32Array *offset)
+        : scale(scale), offset(offset) {};
+
+    const ICLFloatArray *scale;  /**< Quantization scale array */
+    const ICLInt32Array *offset; /**< Quantization offset array */
+};
 } // namespace arm_compute
 #endif /* __ARM_COMPUTE_CL_TYPES_H__ */
diff --git a/arm_compute/core/CL/ICLTensor.h b/arm_compute/core/CL/ICLTensor.h
index 0f5dba9..094a0c3 100644
--- a/arm_compute/core/CL/ICLTensor.h
+++ b/arm_compute/core/CL/ICLTensor.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,6 +26,8 @@
 
 #include "arm_compute/core/ITensor.h"
 
+#include "arm_compute/core/CL/CLTypes.h"
+
 #include <cstdint>
 
 namespace cl
@@ -53,6 +55,11 @@
     /** Default virtual destructor. */
     virtual ~ICLTensor() = default;
 
+    /** Interface to be implemented by the child class to return the wrapped quantization info data
+     *
+     * @return A wrapped quantization info object.
+     */
+    virtual CLQuantization quantization() const = 0;
     /** Interface to be implemented by the child class to return a reference to the OpenCL buffer containing the image's data.
      *
      * @return A reference to an OpenCL buffer containing the image's data.
diff --git a/arm_compute/core/Helpers.h b/arm_compute/core/Helpers.h
index 235657a..87b1fdf 100644
--- a/arm_compute/core/Helpers.h
+++ b/arm_compute/core/Helpers.h
@@ -158,24 +158,24 @@
  *
  * @return The bilinear interpolated pixel value
  */
-inline uint8_t delta_bilinear_c1_quantized(const uint8_t *pixel_ptr, size_t stride, float dx, float dy, QuantizationInfo iq_info, QuantizationInfo oq_info)
+inline uint8_t delta_bilinear_c1_quantized(const uint8_t *pixel_ptr, size_t stride, float dx, float dy, UniformQuantizationInfo iq_info, UniformQuantizationInfo oq_info)
 {
     ARM_COMPUTE_ERROR_ON(pixel_ptr == nullptr);
 
     const float dx1 = 1.0f - dx;
     const float dy1 = 1.0f - dy;
 
-    const float a00 = iq_info.dequantize(*pixel_ptr);
-    const float a01 = iq_info.dequantize(*(pixel_ptr + 1));
-    const float a10 = iq_info.dequantize(*(pixel_ptr + stride));
-    const float a11 = iq_info.dequantize(*(pixel_ptr + stride + 1));
+    const float a00 = dequantize_qasymm8(*pixel_ptr, iq_info);
+    const float a01 = dequantize_qasymm8(*(pixel_ptr + 1), iq_info);
+    const float a10 = dequantize_qasymm8(*(pixel_ptr + stride), iq_info);
+    const float a11 = dequantize_qasymm8(*(pixel_ptr + stride + 1), iq_info);
 
     const float w1  = dx1 * dy1;
     const float w2  = dx * dy1;
     const float w3  = dx1 * dy;
     const float w4  = dx * dy;
     float       res = a00 * w1 + a01 * w2 + a10 * w3 + a11 * w4;
-    return static_cast<uint8_t>(oq_info.quantize(res, RoundingPolicy::TO_NEAREST_UP));
+    return static_cast<uint8_t>(quantize_qasymm8(res, oq_info));
 }
 
 /** Computes linear interpolation using the pointer to the top pixel and the pixel's distance between
diff --git a/arm_compute/core/NEON/NEAsymm.h b/arm_compute/core/NEON/NEAsymm.h
index 253d0fd..2347c46 100644
--- a/arm_compute/core/NEON/NEAsymm.h
+++ b/arm_compute/core/NEON/NEAsymm.h
@@ -182,7 +182,7 @@
  *
  * @return Dequantized values in a neon vector
  */
-inline float32x4x2_t vdequantize(const uint8x8_t &qv, const QuantizationInfo &qi)
+inline float32x4x2_t vdequantize(const uint8x8_t &qv, const UniformQuantizationInfo &qi)
 {
     const float         scale   = qi.scale;
     const int           offset  = qi.offset;
@@ -205,7 +205,7 @@
  *
  * @return Dequantized values in a neon vector
  */
-inline float32x4x4_t vdequantize(const uint8x16_t &qv, const QuantizationInfo &qi)
+inline float32x4x4_t vdequantize(const uint8x16_t &qv, const UniformQuantizationInfo &qi)
 {
     const float         scale   = qi.scale;
     const int           offset  = qi.offset;
@@ -230,7 +230,7 @@
  *
  * @return A neon vector holding the quantized values
  */
-inline uint8x8_t vquantize(const float32x4x2_t &qv, const QuantizationInfo &qi)
+inline uint8x8_t vquantize(const float32x4x2_t &qv, const UniformQuantizationInfo &qi)
 {
     const float       scale     = qi.scale;
     const int         offset    = qi.offset;
@@ -258,7 +258,7 @@
  *
  * @return A neon vector holding the quantized values
  */
-inline uint8x16_t vquantize(const float32x4x4_t &qv, const QuantizationInfo &qi)
+inline uint8x16_t vquantize(const float32x4x4_t &qv, const UniformQuantizationInfo &qi)
 {
     const float       scale     = qi.scale;
     const int         offset    = qi.offset;
diff --git a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
index 447f488..9381bea 100644
--- a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
@@ -25,7 +25,6 @@
 #define __ARM_COMPUTE_NEACTIVATIONLAYERKERNEL_H__
 
 #include "arm_compute/core/NEON/INEKernel.h"
-#include "arm_compute/core/QAsymm8.h"
 #include "arm_compute/core/utils/misc/Traits.h"
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
diff --git a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
index cbb961f..daa29fd 100644
--- a/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
+++ b/arm_compute/core/NEON/kernels/NEPixelWiseMultiplicationKernel.h
@@ -115,7 +115,7 @@
      *
      */
     using MulFunctionQASYMM8 = void(const void *__restrict input1_ptr, const void *__restrict input2_ptr, void *__restrict output_ptr, float scale,
-                                    const QuantizationInfo &input1_qua_info, const QuantizationInfo &input2_qua_info, const QuantizationInfo &output_qua_info);
+                                    const UniformQuantizationInfo &input1_qua_info, const UniformQuantizationInfo &input2_qua_info, const UniformQuantizationInfo &output_qua_info);
 
     MulFunctionFloat   *_func_float;
     MulFunctionInt     *_func_int;
diff --git a/arm_compute/core/PixelValue.h b/arm_compute/core/PixelValue.h
index 0ead9db..4bdcad6 100644
--- a/arm_compute/core/PixelValue.h
+++ b/arm_compute/core/PixelValue.h
@@ -41,11 +41,11 @@
     }
     /** Initialize the union with a pixel value of chosen datatype
      *
-     * @param[in] v          int value.
-     * @param[in] datatype   DataType that @p v have to be stored
-     * @param[in] quant_info QuantizationInfo to apply in case of QASYMM8 datatype to @p v
+     * @param[in] v        int value.
+     * @param[in] datatype DataType that @p v have to be stored
+     * @param[in] qinfo    (Optional) QuantizationInfo to apply in case of QASYMM8 datatype to @p v
      */
-    PixelValue(uint64_t v, DataType datatype, QuantizationInfo quant_info = QuantizationInfo())
+    PixelValue(uint64_t v, DataType datatype, QuantizationInfo qinfo = QuantizationInfo())
         : PixelValue()
     {
         switch(datatype)
@@ -57,7 +57,10 @@
                 value.s8 = static_cast<int8_t>(v);
                 break;
             case DataType::QASYMM8:
-                value.u8 = sqcvt_qasymm8_f32(v, quant_info.scale, quant_info.offset);
+                value.u8 = quantize_qasymm8(static_cast<uint8_t>(v), qinfo);
+                break;
+            case DataType::QSYMM8:
+                value.s8 = quantize_qsymm8(static_cast<int8_t>(v), qinfo);
                 break;
             case DataType::U16:
                 value.u16 = static_cast<uint16_t>(v);
diff --git a/arm_compute/core/QAsymm8.h b/arm_compute/core/QAsymm8.h
deleted file mode 100644
index 2fa4029..0000000
--- a/arm_compute/core/QAsymm8.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __ARM_COMPUTE_QASYMM8_H__
-#define __ARM_COMPUTE_QASYMM8_H__
-
-#include "arm_compute/core/Rounding.h"
-#include <cstdint>
-
-namespace arm_compute
-{
-using qasymm8_t = uint8_t; /**< 8 bit quantized asymmetric scalar value */
-}
-#include "arm_compute/core/QAsymm8.inl"
-#endif /* __ARM_COMPUTE_QASYMM8_H__ */
diff --git a/arm_compute/core/QAsymm8.inl b/arm_compute/core/QAsymm8.inl
deleted file mode 100644
index 77109c4..0000000
--- a/arm_compute/core/QAsymm8.inl
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2017-2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <cmath>
-#include <limits>
-
-namespace arm_compute
-{
-#ifndef DOXYGEN_SKIP_THIS
-inline qasymm8_t sqcvt_qasymm8_f32(float value, float scale, int offset, RoundingPolicy rounding_policy = RoundingPolicy::TO_NEAREST_UP)
-{
-    int quantized = arm_compute::round(value / scale, rounding_policy) + offset;
-    quantized     = std::max(0, std::min(quantized, 255));
-    return quantized;
-}
-
-inline float scvt_f32_qasymm8(qasymm8_t value, float scale, int offset)
-{
-    float dequantized = (static_cast<int>(value) - offset) * scale;
-    return dequantized;
-}
-#endif /* DOXYGEN_SKIP_THIS */
-}
diff --git a/arm_compute/core/QuantizationInfo.h b/arm_compute/core/QuantizationInfo.h
new file mode 100644
index 0000000..94f7e76
--- /dev/null
+++ b/arm_compute/core/QuantizationInfo.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_QUANTIZATION_INFO_H__
+#define __ARM_COMPUTE_QUANTIZATION_INFO_H__
+
+#include "arm_compute/core/Rounding.h"
+
+#include <cstddef>
+#include <vector>
+
+namespace arm_compute
+{
+using qasymm8_t = uint8_t; /**< 8 bit quantized asymmetric scalar value */
+using qsymm8_t  = int8_t;  /**< 8 bit quantized symmetric scalar value */
+
+/** Quantization info when assuming per layer quantization */
+struct UniformQuantizationInfo
+{
+    /** Default constructor */
+    UniformQuantizationInfo()
+        : scale(0.f), offset(0)
+    {
+    }
+    /** Constructor
+     *
+     * @param[in] scale  Quantization scale
+     * @param[in] offset Quantization offset
+     */
+    UniformQuantizationInfo(float scale, int32_t offset)
+        : scale(scale), offset(offset)
+    {
+    }
+    /** Checks if the scale and offset are both zero */
+    bool empty() const
+    {
+        return (scale == 0) && (offset == 0);
+    }
+
+    float   scale;
+    int32_t offset;
+};
+
+/** Quantization information */
+struct QuantizationInfo
+{
+    /** Default constructor */
+    QuantizationInfo() noexcept
+        : scale(),
+          offset()
+    {
+    }
+    /** Construct quantization info.
+     *
+     * @note Used for symmetric quantization
+     *
+     * @param[in] scale Scale.
+     */
+    QuantizationInfo(float scale)
+        : scale(1, scale), offset()
+    {
+    }
+    /** Construct quantization info.
+     *
+     * @note Used for asymmetric quantization
+     *
+     * @param[in] scale  Scale.
+     * @param[in] offset Offset.
+     */
+    QuantizationInfo(float scale, int offset)
+        : scale(1, scale), offset(1, offset)
+    {
+    }
+    /** Construct quantization info.
+     *
+     * @note Used for symmetric per channel quantization
+     *
+     * @param[in] scale Scale.
+     */
+    QuantizationInfo(std::vector<float> scale)
+        : scale(scale), offset()
+    {
+    }
+    /** Indicates whether this QuantizationInfo has valid settings or not
+     *
+     * @return True if the this has invalid settings.
+     */
+    bool empty() const
+    {
+        return scale.empty() && offset.empty();
+    }
+    /** Return per layer quantization info
+     *
+     * @return Uniform quantization information in case of empty information zero is returned in the respective fields
+     */
+    UniformQuantizationInfo uniform() const
+    {
+        UniformQuantizationInfo uqinfo;
+        uqinfo.scale  = scale.empty() ? 0 : scale[0];
+        uqinfo.offset = offset.empty() ? 0 : offset[0];
+
+        return uqinfo;
+    }
+
+    std::vector<float>   scale;  /**< Vector containing scaling factors */
+    std::vector<int32_t> offset; /**< Vector containing zero offsets */
+};
+
+/** Check whether two quantization info are equal.
+ *
+ * @param[in] lhs RHS quantization info.
+ * @param[in] rhs LHS quantization info.
+ *
+ * @return True if the given quantization info is the same.
+ */
+inline bool operator==(const QuantizationInfo &lhs, const QuantizationInfo &rhs)
+{
+    return (lhs.scale == rhs.scale) && (lhs.offset == rhs.offset);
+}
+
+/** Check whether two quantization info are not equal.
+ *
+ * @param[in] lhs RHS quantization info.
+ * @param[in] rhs LHS quantization info.
+ *
+ * @return True if the given quantization info is the same.
+ */
+inline bool operator!=(const QuantizationInfo &lhs, const QuantizationInfo &rhs)
+{
+    return !(operator==(lhs, rhs));
+}
+
+/** Check whether two quantization info are equal.
+ *
+ * @param[in] lhs RHS quantization info.
+ * @param[in] rhs LHS quantization info.
+ *
+ * @return True if the given quantization info is the same.
+ */
+inline bool operator==(const UniformQuantizationInfo &lhs, const UniformQuantizationInfo &rhs)
+{
+    return (lhs.scale == rhs.scale) && (lhs.offset == rhs.offset);
+}
+
+/** Check whether two quantization info are not equal.
+ *
+ * @param[in] lhs RHS quantization info.
+ * @param[in] rhs LHS quantization info.
+ *
+ * @return True if the given quantization info is the same.
+ */
+inline bool operator!=(const UniformQuantizationInfo &lhs, const UniformQuantizationInfo &rhs)
+{
+    return !(operator==(lhs, rhs));
+}
+
+/** Quantize a value given a asymmetric quantization scheme
+ *
+ * @param[in] value           Value to quantize
+ * @param[in] qinfo           Quantization information to use for quantizing
+ * @param[in] rounding_policy (Optional) Rounding policy to use. Default: nearest up
+ *
+ * @return Quantized value
+ */
+inline uint8_t quantize_qasymm8(float value, const UniformQuantizationInfo &qinfo, RoundingPolicy rounding_policy = RoundingPolicy::TO_NEAREST_UP)
+{
+    int quantized = arm_compute::round(value / qinfo.scale, rounding_policy) + qinfo.offset;
+    quantized     = std::max(0, std::min(quantized, 255));
+    return quantized;
+}
+
+/** Quantize a value given a asymmetric quantization scheme
+ *
+ * @param[in] value           Value to quantize
+ * @param[in] qinfo           Quantization information to use for quantizing
+ * @param[in] rounding_policy (Optional) Rounding policy to use. Default: nearest up
+ *
+ * @return Quantized value
+ */
+inline uint8_t quantize_qasymm8(float value, const QuantizationInfo &qinfo, RoundingPolicy rounding_policy = RoundingPolicy::TO_NEAREST_UP)
+{
+    UniformQuantizationInfo uqinfo    = qinfo.uniform();
+    int                     quantized = arm_compute::round(value / uqinfo.scale, rounding_policy) + uqinfo.offset;
+    quantized                         = std::max(0, std::min(quantized, 255));
+    return quantized;
+}
+
+/** Quantize a value given a symmetric quantization scheme
+ *
+ * @param[in] value Value to quantize
+ * @param[in] qinfo Quantization information to use for quantizing
+ *
+ * @return Quantized value
+ */
+inline int8_t quantize_qsymm8(float value, const QuantizationInfo &qinfo)
+{
+    int quantized = arm_compute::round(value / qinfo.uniform().scale, RoundingPolicy::TO_NEAREST_UP);
+    quantized     = std::max(-128, std::min(quantized, 127));
+    return quantized;
+}
+
+/** Dequantize a value given a asymmetric quantization scheme
+ *
+ * @param[in] value Value to dequantize
+ * @param[in] qinfo Quantization information to use for dequantizing
+ *
+ * @return Dequantized value
+ */
+inline float dequantize_qasymm8(uint8_t value, const UniformQuantizationInfo &qinfo)
+{
+    return (static_cast<int>(value) - qinfo.offset) * qinfo.scale;
+}
+
+/** Dequantize a value given a asymmetric quantization scheme
+ *
+ * @param[in] value Value to dequantize
+ * @param[in] qinfo Quantization information to use for dequantizing
+ *
+ * @return Dequantized value
+ */
+inline float dequantize_qasymm8(uint8_t value, const QuantizationInfo &qinfo)
+{
+    UniformQuantizationInfo uqinfo = qinfo.uniform();
+    return (static_cast<int>(value) - uqinfo.offset) * uqinfo.scale;
+}
+
+/** Dequantize a value given a symmetric quantization scheme
+ *
+ * @param[in] value Value to dequantize
+ * @param[in] qinfo Quantization information to use for dequantizing
+ *
+ * @return Dequantized value
+ */
+inline float dequantize_qsymm8(int8_t value, const QuantizationInfo &qinfo)
+{
+    return value * qinfo.uniform().scale;
+}
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_QUANTIZATION_INFO_H__ */
\ No newline at end of file
diff --git a/arm_compute/core/Types.h b/arm_compute/core/Types.h
index 972d6ef..1787e68 100644
--- a/arm_compute/core/Types.h
+++ b/arm_compute/core/Types.h
@@ -25,8 +25,7 @@
 #define __ARM_COMPUTE_TYPES_H__
 
 #include "arm_compute/core/Coordinates.h"
-#include "arm_compute/core/QAsymm8.h"
-#include "arm_compute/core/Rounding.h"
+#include "arm_compute/core/QuantizationInfo.h"
 #include "arm_compute/core/Size2D.h"
 #include "arm_compute/core/Strides.h"
 #include "arm_compute/core/TensorShape.h"
@@ -73,20 +72,22 @@
 /** Available data types */
 enum class DataType
 {
-    UNKNOWN, /**< Unknown data type */
-    U8,      /**< unsigned 8-bit number */
-    S8,      /**< signed 8-bit number */
-    QASYMM8, /**< quantized, asymmetric fixed-point 8-bit number */
-    U16,     /**< unsigned 16-bit number */
-    S16,     /**< signed 16-bit number */
-    U32,     /**< unsigned 32-bit number */
-    S32,     /**< signed 32-bit number */
-    U64,     /**< unsigned 64-bit number */
-    S64,     /**< signed 64-bit number */
-    F16,     /**< 16-bit floating-point number */
-    F32,     /**< 32-bit floating-point number */
-    F64,     /**< 64-bit floating-point number */
-    SIZET    /**< size_t */
+    UNKNOWN,            /**< Unknown data type */
+    U8,                 /**< unsigned 8-bit number */
+    S8,                 /**< signed 8-bit number */
+    QSYMM8,             /**< quantized, symmetric fixed-point 8-bit number */
+    QASYMM8,            /**< quantized, asymmetric fixed-point 8-bit number */
+    QSYMM8_PER_CHANNEL, /**< quantized, symmetric per channel fixed-point 8-bit number */
+    U16,                /**< unsigned 16-bit number */
+    S16,                /**< signed 16-bit number */
+    U32,                /**< unsigned 32-bit number */
+    S32,                /**< signed 32-bit number */
+    U64,                /**< unsigned 64-bit number */
+    S64,                /**< signed 64-bit number */
+    F16,                /**< 16-bit floating-point number */
+    F32,                /**< 32-bit floating-point number */
+    F64,                /**< 64-bit floating-point number */
+    SIZET               /**< size_t */
 };
 
 /** Available Sampling Policies */
@@ -160,86 +161,6 @@
     LessEqual     /**< Less equal comparison ( \f$ x <= y \f$ ) */
 };
 
-/** Quantization settings (used for QASYMM8 data type) */
-struct QuantizationInfo
-{
-    /** Default constructor */
-    QuantizationInfo() noexcept
-        : scale(0.0f),
-          offset(0)
-    {
-    }
-
-    /** Construct quantization info.
-     *
-     * @param[in] scale  Scale.
-     * @param[in] offset Offset.
-     */
-    QuantizationInfo(float scale, int offset)
-        : scale(scale), offset(offset)
-    {
-    }
-
-    /** Check whether equal to a given quantization info.
-     *
-     * @param[in] other Other quantization info.
-     *
-     * @return True if the given quantization info is the same.
-     */
-    bool operator==(const QuantizationInfo &other) const
-    {
-        return scale == other.scale && offset == other.offset;
-    }
-
-    /** Check whether not equal to a given quantization info.
-     *
-     * @param[in] other Other quantization info.
-     *
-     * @return True if the given quantization info is not the same.
-     */
-    bool operator!=(const QuantizationInfo &other) const
-    {
-        return !(*this == other);
-    }
-
-    float scale;  /**< scale */
-    int   offset; /**< offset */
-
-    /** Quantizes a value using the scale/offset in this QuantizationInfo
-     *
-     * @param[in] value           Value to quantize.
-     * @param[in] rounding_policy Policy to use when rounding.
-     *
-     * @return the quantized value.
-     */
-    qasymm8_t quantize(float value, RoundingPolicy rounding_policy) const
-    {
-        ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::quantize: scale == 0");
-        return sqcvt_qasymm8_f32(value, scale, offset, rounding_policy);
-    }
-
-    /** Dequantizes a value using the scale/offset in this QuantizationInfo
-     *
-     * @param[in] value Value to dequantize.
-     *
-     * @return the original value before quantization.
-     */
-    float dequantize(qasymm8_t value) const
-    {
-        ARM_COMPUTE_ERROR_ON_MSG(scale == 0, "QuantizationInfo::dequantize: scale == 0");
-        return scvt_f32_qasymm8(value, scale, offset);
-    }
-
-    /** Indicates whether this QuantizationInfo has valid settings or not
-     *
-     * @return True if the this has invalid settings.
-     */
-    bool empty() const
-    {
-        return scale == 0;
-    }
-};
-
 /** Container for valid region of a window */
 struct ValidRegion
 {
diff --git a/arm_compute/core/Utils.h b/arm_compute/core/Utils.h
index 1de0df6..8630eee 100644
--- a/arm_compute/core/Utils.h
+++ b/arm_compute/core/Utils.h
@@ -111,7 +111,9 @@
     {
         case DataType::U8:
         case DataType::S8:
+        case DataType::QSYMM8:
         case DataType::QASYMM8:
+        case DataType::QSYMM8_PER_CHANNEL:
             return 1;
         case DataType::U16:
         case DataType::S16:
@@ -183,7 +185,9 @@
     {
         case DataType::S8:
         case DataType::U8:
+        case DataType::QSYMM8:
         case DataType::QASYMM8:
+        case DataType::QSYMM8_PER_CHANNEL:
             return 1;
         case DataType::U16:
         case DataType::S16:
@@ -521,7 +525,9 @@
             return DataType::U32;
         case DataType::S16:
             return DataType::S32;
+        case DataType::QSYMM8:
         case DataType::QASYMM8:
+        case DataType::QSYMM8_PER_CHANNEL:
         case DataType::F16:
         case DataType::U32:
         case DataType::S32:
@@ -999,7 +1005,9 @@
 {
     switch(dt)
     {
+        case DataType::QSYMM8:
         case DataType::QASYMM8:
+        case DataType::QSYMM8_PER_CHANNEL:
             return true;
         default:
             return false;
@@ -1059,14 +1067,14 @@
 
 /** Returns true if the value can be represented by the given data type
  *
- * @param[in] val        value to be checked
- * @param[in] dt         data type that is checked
- * @param[in] quant_info quantization info if the data type is QASYMM8
+ * @param[in] val   value to be checked
+ * @param[in] dt    data type that is checked
+ * @param[in] qinfo (Optional) quantization info if the data type is QASYMM8
  *
  * @return true if the data type can hold the value.
  */
 template <typename T>
-bool check_value_range(T val, DataType dt, QuantizationInfo quant_info = QuantizationInfo())
+bool check_value_range(T val, DataType dt, QuantizationInfo qinfo = QuantizationInfo())
 {
     switch(dt)
     {
@@ -1074,8 +1082,8 @@
             return ((static_cast<uint8_t>(val) == val) && val >= std::numeric_limits<uint8_t>::lowest() && val <= std::numeric_limits<uint8_t>::max());
         case DataType::QASYMM8:
         {
-            double min = static_cast<double>(quant_info.dequantize(0));
-            double max = static_cast<double>(quant_info.dequantize(std::numeric_limits<uint8_t>::max()));
+            double min = static_cast<double>(dequantize_qasymm8(0, qinfo));
+            double max = static_cast<double>(dequantize_qasymm8(std::numeric_limits<uint8_t>::max(), qinfo));
             return ((double)val >= min && (double)val <= max);
         }
         case DataType::S8:
diff --git a/arm_compute/runtime/CL/CLSubTensor.h b/arm_compute/runtime/CL/CLSubTensor.h
index 9c37f8b..1625aa5 100644
--- a/arm_compute/runtime/CL/CLSubTensor.h
+++ b/arm_compute/runtime/CL/CLSubTensor.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2018 ARM Limited.
+ * Copyright (c) 2017-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -88,6 +88,7 @@
     ITensorInfo      *info() const override;
     ITensorInfo      *info() override;
     const cl::Buffer &cl_buffer() const override;
+    CLQuantization    quantization() const override;
 
 protected:
     // Inherited methods overridden:
diff --git a/arm_compute/runtime/CL/CLTensor.h b/arm_compute/runtime/CL/CLTensor.h
index c47d2be..65ff4f2 100644
--- a/arm_compute/runtime/CL/CLTensor.h
+++ b/arm_compute/runtime/CL/CLTensor.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -32,6 +32,7 @@
 
 namespace arm_compute
 {
+// Forward declarations
 class ITensorAllocator;
 class ITensorInfo;
 
@@ -66,6 +67,7 @@
     TensorInfo       *info() const override;
     TensorInfo       *info() override;
     const cl::Buffer &cl_buffer() const override;
+    CLQuantization    quantization() const override;
 
 protected:
     // Inherited methods overridden:
diff --git a/arm_compute/runtime/CL/CLTensorAllocator.h b/arm_compute/runtime/CL/CLTensorAllocator.h
index 302bd6d..f942478 100644
--- a/arm_compute/runtime/CL/CLTensorAllocator.h
+++ b/arm_compute/runtime/CL/CLTensorAllocator.h
@@ -24,9 +24,11 @@
 #ifndef __ARM_COMPUTE_CLTENSORALLOCATOR_H__
 #define __ARM_COMPUTE_CLTENSORALLOCATOR_H__
 
+#include "arm_compute/runtime/CL/CLArray.h"
 #include "arm_compute/runtime/CL/CLMemory.h"
 #include "arm_compute/runtime/ITensorAllocator.h"
 
+#include "arm_compute/core/CL/CLTypes.h"
 #include "arm_compute/core/CL/OpenCL.h"
 
 #include <cstdint>
@@ -67,6 +69,11 @@
      * @return pointer to the CL data.
      */
     const cl::Buffer &cl_data() const;
+    /** Wrapped quantization info data accessor
+     *
+     * @return A wrapped quantization info object.
+     */
+    CLQuantization quantization() const;
 
     /** Enqueue a map operation of the allocated buffer on the given queue.
      *
@@ -137,6 +144,8 @@
     CLMemory       _memory;                  /**< OpenCL memory */
     uint8_t       *_mapping;                 /**< Pointer to the CPU mapping of the OpenCL buffer. */
     CLTensor      *_owner;                   /**< Owner of the allocator */
+    CLFloatArray   _scale;
+    CLInt32Array   _offset;
 };
 } // namespace arm_compute
 #endif /* __ARM_COMPUTE_CLTENSORALLOCATOR_H__ */
diff --git a/arm_compute/runtime/CL/CLTunerTypes.h b/arm_compute/runtime/CL/CLTunerTypes.h
index 7d13b6d..20c026e 100644
--- a/arm_compute/runtime/CL/CLTunerTypes.h
+++ b/arm_compute/runtime/CL/CLTunerTypes.h
@@ -26,6 +26,7 @@
 
 #include "arm_compute/core/Error.h"
 #include "arm_compute/core/utils/misc/Utility.h"
+
 #include <map>
 
 namespace arm_compute
diff --git a/arm_compute/runtime/ITensorAllocator.h b/arm_compute/runtime/ITensorAllocator.h
index bb708f0..f829cf2 100644
--- a/arm_compute/runtime/ITensorAllocator.h
+++ b/arm_compute/runtime/ITensorAllocator.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016-2018 ARM Limited.
+ * Copyright (c) 2016-2019 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -96,5 +96,5 @@
     TensorInfo _info;      /**< Tensor's metadata. */
     size_t     _alignment; /**< Tensor's alignment in bytes */
 };
-}
+} // namespace arm_compute
 #endif /*__ARM_COMPUTE_ITENSORALLOCATOR_H__ */