COMPMID-556: Fixes bias in CLDirectConvolutionLayer to be int32.

Biases were incorrectly passed as uchar8 in asymmetric quantized
calculations while they should be in int32.

Change-Id: I461f5e4ef6eb44374c1094378a1bfe9ade5e247d
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/96244
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/tests/validation/CPP/ConvolutionLayer.cpp b/tests/validation/CPP/ConvolutionLayer.cpp
index aa73869..95852b0 100644
--- a/tests/validation/CPP/ConvolutionLayer.cpp
+++ b/tests/validation/CPP/ConvolutionLayer.cpp
@@ -48,8 +48,8 @@
 }
 
 // 3D convolution for floating point type
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
-void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<T> &bias, SimpleTensor<T> &out,
+template < typename T, typename TB, typename std::enable_if < is_floating_point<T>::value &&is_floating_point<TB>::value, int >::type = 0 >
+void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &out,
                    int i_offset, int w_offset, int b_offset, int o_offset,
                    int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
 {
@@ -95,8 +95,8 @@
 }
 
 // 3D convolution for fixed point type
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
-void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<T> &bias, SimpleTensor<T> &out,
+template < typename T, typename TB, typename std::enable_if < std::is_integral<T>::value &&std::is_integral<TB>::value, int >::type = 0 >
+void convolution3d(const SimpleTensor<T> &in, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, SimpleTensor<T> &out,
                    int i_offset, int w_offset, int b_offset, int o_offset,
                    int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
 {
@@ -152,13 +152,13 @@
 
 // 3D convolution for QASYMM8 type
 template <>
-void convolution3d(const SimpleTensor<uint8_t> &in, const SimpleTensor<uint8_t> &weights, const SimpleTensor<uint8_t> &bias, SimpleTensor<uint8_t> &out,
+void convolution3d(const SimpleTensor<uint8_t> &in, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, SimpleTensor<uint8_t> &out,
                    int i_offset, int w_offset, int b_offset, int o_offset,
                    int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights)
 {
     const uint8_t *in_ptr  = in.data() + i_offset;
     const uint8_t *w_ptr   = weights.data() + w_offset;
-    const uint8_t *b_ptr   = bias.data() + b_offset;
+    const int32_t *b_ptr   = bias.data() + b_offset;
     uint8_t       *out_ptr = out.data() + o_offset;
 
     const int   input_offset   = -in.quantization_info().offset;
@@ -218,8 +218,8 @@
 }
 } // namespace
 
-template <typename T>
-SimpleTensor<T> convolution_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<T> &bias, const TensorShape &output_shape, const PadStrideInfo &info)
+template <typename T, typename TB>
+SimpleTensor<T> convolution_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &weights, const SimpleTensor<TB> &bias, const TensorShape &output_shape, const PadStrideInfo &info)
 {
     // Create reference
     SimpleTensor<T> dst{ output_shape, src.data_type(), 1, src.fixed_point_position(), src.quantization_info() };
@@ -286,7 +286,7 @@
                                                  const PadStrideInfo &info);
 template SimpleTensor<qint16_t> convolution_layer(const SimpleTensor<qint16_t> &src, const SimpleTensor<qint16_t> &weights, const SimpleTensor<qint16_t> &bias, const TensorShape &output_shape,
                                                   const PadStrideInfo &info);
-template SimpleTensor<uint8_t> convolution_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<uint8_t> &bias, const TensorShape &output_shape,
+template SimpleTensor<uint8_t> convolution_layer(const SimpleTensor<uint8_t> &src, const SimpleTensor<uint8_t> &weights, const SimpleTensor<int32_t> &bias, const TensorShape &output_shape,
                                                  const PadStrideInfo &info);
 } // namespace reference
 } // namespace validation