COMPMID-345: Added support for arm8.2+FP16 in the the validation framework.

Change-Id: Ifef2133d4a0da5456bec147330405b6d58cf6a71
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/78676
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/tests/validation/NEON/ConvolutionLayer.cpp b/tests/validation/NEON/ConvolutionLayer.cpp
index a1dbe38..ee2b24d 100644
--- a/tests/validation/NEON/ConvolutionLayer.cpp
+++ b/tests/validation/NEON/ConvolutionLayer.cpp
@@ -197,4 +197,4 @@
 BOOST_AUTO_TEST_SUITE_END()
 BOOST_AUTO_TEST_SUITE_END()
 BOOST_AUTO_TEST_SUITE_END()
-#endif
\ No newline at end of file
+#endif
diff --git a/tests/validation/TensorFactory.h b/tests/validation/TensorFactory.h
index 48f9d67..610425b 100644
--- a/tests/validation/TensorFactory.h
+++ b/tests/validation/TensorFactory.h
@@ -30,6 +30,10 @@
 
 #include "boost_wrapper.h"
 
+#if ARM_COMPUTE_ENABLE_FP16
+#include <arm_fp16.h> // needed for float16_t
+#endif
+
 namespace arm_compute
 {
 namespace test
@@ -39,7 +43,7 @@
 using TensorVariant = boost::variant < Tensor<uint8_t>, Tensor<int8_t>,
       Tensor<uint16_t>, Tensor<int16_t>,
       Tensor<uint32_t>, Tensor<int32_t>,
-#ifdef ENABLE_FP16
+#ifdef ARM_COMPUTE_ENABLE_FP16
       Tensor<float16_t>,
 #endif
       Tensor<float >>;
@@ -90,10 +94,10 @@
                 using value_type_s32 = typename match_const<R, int32_t>::type;
                 v                    = Tensor<int32_t>(shape, dt, fixed_point_position, reinterpret_cast<value_type_s32 *>(data));
                 break;
-#ifdef ENABLE_FP16
+#ifdef ARM_COMPUTE_ENABLE_FP16
             case DataType::F16:
                 using value_type_f16 = typename match_const<R, float16_t>::type;
-                v                    = Tensor<float16_t>(raw.shape(), dt, reinterpret_cast<value_type_f16 *>(raw.data()));
+                v                    = Tensor<float16_t>(shape, dt, fixed_point_position, reinterpret_cast<value_type_f16 *>(data));
                 break;
 #endif
             case DataType::F32:
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index 7337924..56cc657 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -49,13 +49,24 @@
 {
 namespace
 {
+template <class T>
+struct is_floating_point
+    : std::integral_constant < bool,
+      std::is_same<float, typename std::remove_cv<T>::type>::value ||
+#if ARM_COMPUTE_ENABLE_FP16
+      std::is_same<float16_t, typename std::remove_cv<T>::type>::value ||
+#endif
+      std::is_same<double, typename std::remove_cv<T>::type>::value || std::is_same<long double, typename std::remove_cv<T>::type>::value >
+{
+};
+
 bool is_valid_pixel(int i, int min, int max)
 {
     return (i >= min && i < max);
 }
 
 // 3D convolution for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int8_t fixed_point_position)
 {
     const int half_width_weights  = width_weights / 2;
@@ -525,7 +536,7 @@
 }
 
 // Matrix multiplication for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void gemm(const Tensor<T> &in1, const Tensor<T> &in2, const Tensor<T> &in3, Tensor<T> &out, float alpha, float beta)
 {
     const int M = out.shape().y();
@@ -609,7 +620,7 @@
     for(int i = 0; i < in1.num_elements(); ++i)
     {
         double val = static_cast<intermediate_type>(in1[i]) * static_cast<intermediate_type>(in2[i]) * static_cast<double>(scale);
-        if(std::is_floating_point<T3>::value)
+        if(is_floating_point<T3>::value)
         {
             out[i] = val;
         }
@@ -705,7 +716,7 @@
 }
 
 // Activation Layer for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info)
 {
     const T a = static_cast<T>(act_info.a());
@@ -838,7 +849,7 @@
 }
 
 // Batch Normalization Layer for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)
 {
     const int cols       = static_cast<int>(in.shape()[0]);
@@ -940,7 +951,7 @@
 }
 
 // Normalization Layer for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void normalization_layer(const Tensor<T> &in, Tensor<T> &out, NormalizationLayerInfo norm_info)
 {
     const uint32_t norm_size = norm_info.norm_size();
@@ -1235,7 +1246,7 @@
                     hstart      = std::max(hstart, 0);
                     wend        = std::min(wend, w_in);
                     hend        = std::min(hend, h_in);
-                    if(std::is_floating_point<T>::value)
+                    if(is_floating_point<T>::value)
                     {
                         for(int y = hstart; y < hend; ++y)
                         {
@@ -1267,7 +1278,7 @@
 }
 
 // Softmax Layer
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
 {
     const int cols       = static_cast<int>(in.shape()[0]);
diff --git a/tests/validation/Validation.cpp b/tests/validation/Validation.cpp
index 17dc695..8aada0c 100644
--- a/tests/validation/Validation.cpp
+++ b/tests/validation/Validation.cpp
@@ -40,6 +40,10 @@
 #include <cstdint>
 #include <iomanip>
 
+#if ARM_COMPUTE_ENABLE_FP16
+#include <arm_fp16.h> // needed for float16_t
+#endif
+
 namespace arm_compute
 {
 namespace test
@@ -82,7 +86,7 @@
             return *reinterpret_cast<const uint64_t *>(ptr);
         case DataType::S64:
             return *reinterpret_cast<const int64_t *>(ptr);
-#if ENABLE_FP16
+#if ARM_COMPUTE_ENABLE_FP16
         case DataType::F16:
             return *reinterpret_cast<const float16_t *>(ptr);
 #endif
@@ -384,6 +388,8 @@
 
 void validate(std::vector<unsigned int> classified_labels, std::vector<unsigned int> expected_labels)
 {
+    ARM_COMPUTE_UNUSED(classified_labels);
+    ARM_COMPUTE_UNUSED(expected_labels);
     BOOST_TEST(expected_labels.size() != 0);
     BOOST_TEST(classified_labels.size() == expected_labels.size());