COMPMID-345: Added support for arm8.2+FP16 in the the validation framework.

Change-Id: Ifef2133d4a0da5456bec147330405b6d58cf6a71
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/78676
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index 7337924..56cc657 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -49,13 +49,24 @@
 {
 namespace
 {
+template <class T>
+struct is_floating_point
+    : std::integral_constant < bool,
+      std::is_same<float, typename std::remove_cv<T>::type>::value ||
+#if ARM_COMPUTE_ENABLE_FP16
+      std::is_same<float16_t, typename std::remove_cv<T>::type>::value ||
+#endif
+      std::is_same<double, typename std::remove_cv<T>::type>::value || std::is_same<long double, typename std::remove_cv<T>::type>::value >
+{
+};
+
 bool is_valid_pixel(int i, int min, int max)
 {
     return (i >= min && i < max);
 }
 
 // 3D convolution for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void convolution3d(const T *in, const T *weights, const T *bias, T *out, int xi, int yi, int width_in, int height_in, int depth_in, int width_weights, int height_weights, int8_t fixed_point_position)
 {
     const int half_width_weights  = width_weights / 2;
@@ -525,7 +536,7 @@
 }
 
 // Matrix multiplication for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void gemm(const Tensor<T> &in1, const Tensor<T> &in2, const Tensor<T> &in3, Tensor<T> &out, float alpha, float beta)
 {
     const int M = out.shape().y();
@@ -609,7 +620,7 @@
     for(int i = 0; i < in1.num_elements(); ++i)
     {
         double val = static_cast<intermediate_type>(in1[i]) * static_cast<intermediate_type>(in2[i]) * static_cast<double>(scale);
-        if(std::is_floating_point<T3>::value)
+        if(is_floating_point<T3>::value)
         {
             out[i] = val;
         }
@@ -705,7 +716,7 @@
 }
 
 // Activation Layer for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void activation_layer(const Tensor<T> &in, Tensor<T> &out, ActivationLayerInfo act_info)
 {
     const T a = static_cast<T>(act_info.a());
@@ -838,7 +849,7 @@
 }
 
 // Batch Normalization Layer for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)
 {
     const int cols       = static_cast<int>(in.shape()[0]);
@@ -940,7 +951,7 @@
 }
 
 // Normalization Layer for floating point type
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void normalization_layer(const Tensor<T> &in, Tensor<T> &out, NormalizationLayerInfo norm_info)
 {
     const uint32_t norm_size = norm_info.norm_size();
@@ -1235,7 +1246,7 @@
                     hstart      = std::max(hstart, 0);
                     wend        = std::min(wend, w_in);
                     hend        = std::min(hend, h_in);
-                    if(std::is_floating_point<T>::value)
+                    if(is_floating_point<T>::value)
                     {
                         for(int y = hstart; y < hend; ++y)
                         {
@@ -1267,7 +1278,7 @@
 }
 
 // Softmax Layer
-template <typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type * = nullptr>
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
 void softmax_layer(const Tensor<T> &in, Tensor<T> &out)
 {
     const int cols       = static_cast<int>(in.shape()[0]);