COMPMID-421: Fixed a problem in Convolution Layer reference values for FP16.

All methods in std::numeric_limits<float16_t> return 0.

Change-Id: I2289e01853e1b2c38afdec119ef6fc8af8a9752e
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/79312
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Moritz Pflanzer <moritz.pflanzer@arm.com>
diff --git a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.cpp b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.cpp
index dcfbb13..1db0257 100644
--- a/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.cpp
+++ b/src/core/NEON/kernels/NEGEMMMatrixMultiplyKernel.cpp
@@ -639,6 +639,7 @@
 void matrix_matrix_multiply_f16(const ITensor *input0, const ITensor *input1, ITensor *output, const Window &window, float alpha)
 {
 #ifdef ARM_COMPUTE_ENABLE_FP16
+
     const size_t in_b_stride = input1->info()->strides_in_bytes()[1] / data_size_from_type(input1->info()->data_type());
     const size_t out_stride  = output->info()->strides_in_bytes()[1] / data_size_from_type(output->info()->data_type());
 
diff --git a/tests/TensorLibrary.h b/tests/TensorLibrary.h
index b05302a..4d7143a 100644
--- a/tests/TensorLibrary.h
+++ b/tests/TensorLibrary.h
@@ -501,7 +501,7 @@
 #if ARM_COMPUTE_ENABLE_FP16
         case DataType::F16:
         {
-            std::uniform_real_distribution<float> distribution_f16(std::numeric_limits<float16_t>::lowest(), std::numeric_limits<float16_t>::max());
+            std::uniform_real_distribution<float> distribution_f16(-1000.f, 1000.f);
             fill(tensor, distribution_f16, seed_offset);
             break;
         }
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index 40c1e2e..d81834b 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -42,7 +42,10 @@
 namespace
 {
 const float tolerance_f32 = 1e-03f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
-const float tolerance_qs8 = 3.0f;   /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */
+#ifdef ARM_COMPUTE_ENABLE_FP16
+const float tolerance_f16 = 0.01f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+#endif
+const float tolerance_qs8 = 3.0f; /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */
 
 Tensor compute_convolution_layer(const TensorShape &input_shape, const TensorShape &weights_shape, const TensorShape &bias_shape, const TensorShape &output_shape, DataType dt,
                                  const PadStrideInfo &conv_info, int fixed_point_position)
@@ -69,7 +72,7 @@
     BOOST_TEST(!dst.info()->is_resizable());
 
     // Fill tensors
-    if(dt == DataType::F32)
+    if(dt == DataType::F16 || dt == DataType::F32)
     {
         std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
         library->fill(NEAccessor(src), distribution, 0);
@@ -144,7 +147,7 @@
     RawTensor ref_dst = Reference::compute_reference_convolution_layer(conv_set.src_shape, conv_set.weights_shape, conv_set.bias_shape, conv_set.dst_shape, dt, conv_set.info, 0);
 
     // Validate output
-    validate(NEAccessor(dst), ref_dst, tolerance_f32);
+    validate(NEAccessor(dst), ref_dst, tolerance_f16);
 }
 BOOST_AUTO_TEST_SUITE_END()
 #endif
diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp
index 2388fb6..0518819 100644
--- a/tests/validation/Reference.cpp
+++ b/tests/validation/Reference.cpp
@@ -506,7 +506,7 @@
     RawTensor ref_dst     = library->get(output_shape, dt, 1, fixed_point_position);
 
     // Fill reference
-    if(dt == DataType::F32)
+    if(dt == DataType::F16 || dt == DataType::F32)
     {
         std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
         library->fill(ref_src, distribution, 0);