COMPMID-2308: NEConvolutionLayer: support QUANT8_SYMM_PER_CHANNEL filters

Change-Id: Ic1bf5f0d21ccd525f84213a360f7e199d7f50577
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/2177
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/core/QuantizationInfo.h b/arm_compute/core/QuantizationInfo.h
index 5e6e5b3..949ee66 100644
--- a/arm_compute/core/QuantizationInfo.h
+++ b/arm_compute/core/QuantizationInfo.h
@@ -250,6 +250,36 @@
     return quantized;
 }
 
+/** Quantize a value given a 8-bit symmetric per channel quantization scheme
+ *
+ * @param[in] value      Value to quantize
+ * @param[in] qinfo      Quantization information to use for quantizing
+ * @param[in] channel_id channel index into the scale vector of quantization info
+ *
+ * @return Quantized value
+ */
+inline int8_t quantize_qsymm8_per_channel(float value, const QuantizationInfo &qinfo, size_t channel_id = 0)
+{
+    int quantized = arm_compute::round(value / qinfo.scale()[channel_id], RoundingPolicy::TO_NEAREST_UP);
+    quantized     = std::max(-128, std::min(quantized, 127));
+    return quantized;
+}
+
+/** Quantize a value given a 8-bit asymmetric per channel quantization scheme
+ *
+ * @param[in] value      Value to quantize
+ * @param[in] qinfo      Quantization information to use for quantizing
+ * @param[in] channel_id channel index into the scale vector of quantization info
+ *
+ * @return Quantized value
+ */
+inline int8_t quantize_qasymm8_per_channel(float value, const QuantizationInfo &qinfo, size_t channel_id = 0)
+{
+    int quantized = arm_compute::round(value / qinfo.scale()[channel_id], RoundingPolicy::TO_NEAREST_UP);
+    quantized     = std::max(0, std::min(quantized, 255));
+    return quantized;
+}
+
 /** Dequantize a value given a 8-bit asymmetric quantization scheme
  *
  * @param[in] value Value to dequantize