COMPMID-1632 Add CLL2NormalizationLayer for NHWC and FP32

Change-Id: Iae22554d5fe893fd22a000eab5bfd8275ea06eb3
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/154102
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: bsgcomp <bsgcomp@arm.com>
diff --git a/tests/validation/CL/L2NormalizeLayer.cpp b/tests/validation/CL/L2NormalizeLayer.cpp
index 3d121b0..517ba84 100644
--- a/tests/validation/CL/L2NormalizeLayer.cpp
+++ b/tests/validation/CL/L2NormalizeLayer.cpp
@@ -44,6 +44,10 @@
 {
 /** Tolerance for float operations */
 constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f);
+constexpr AbsoluteTolerance<float> tolerance_f16(0.01f);
+
+auto data = concat(combine(framework::dataset::make("DataLayout", { DataLayout::NCHW }), framework::dataset::make("Axis", { 0 })), combine(framework::dataset::make("DataLayout", { DataLayout::NHWC }),
+                   framework::dataset::make("Axis", { 1 })));
 
 } // namespace
 
@@ -58,7 +62,7 @@
                                              TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1
                                              TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != F32
                                              TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions
-                                             TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis > 0
+                                             TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis > 3
                                              TensorInfo(TensorShape(128U, 64U), 1, DataType::F32)
                                            }),
     framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(128U, 64U), 1, DataType::F16),
@@ -69,7 +73,7 @@
                                              TensorInfo(TensorShape(128U, 64U), 1, DataType::F32),
                                              TensorInfo(TensorShape(128U, 64U), 1, DataType::F32)
                                            })),
-    framework::dataset::make("Axis",       { 0U, 0U, 0U, 0U, static_cast<unsigned int>(TensorShape::num_max_dimensions), 1U, 0U })),
+    framework::dataset::make("Axis",       { 0U, 0U, 0U, 0U, static_cast<unsigned int>(TensorShape::num_max_dimensions), 4U, 0U })),
     framework::dataset::make("Expected",   { false, false, false, false, false, false, true })),
     input_info, output_info, axis, expected)
 {
@@ -87,22 +91,36 @@
 TEST_SUITE(Float)
 TEST_SUITE(FP32)
 FIXTURE_DATA_TEST_CASE(RunSmall, CLL2NormalizeLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
-                       combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Epsilon", { 1e-12 })))
+                       combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), data), framework::dataset::make("Epsilon", { 1e-12 })))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f32);
 }
 FIXTURE_DATA_TEST_CASE(RunLarge, CLL2NormalizeLayerFixture<float>, framework::DatasetMode::NIGHTLY,
-                       combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Epsilon", { 1e-12 })))
+                       combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), data), framework::dataset::make("Epsilon", { 1e-12 })))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f32);
 }
-TEST_SUITE_END()
-TEST_SUITE_END()
+TEST_SUITE_END() // FP32
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLL2NormalizeLayerFixture<half>, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)), data), framework::dataset::make("Epsilon", { 1e-6 })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLL2NormalizeLayerFixture<half>, framework::DatasetMode::NIGHTLY,
+                       combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F16)), data), framework::dataset::make("Epsilon", { 1e-6 })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+TEST_SUITE_END() // FP16
+TEST_SUITE_END() // Float
 
-TEST_SUITE_END()
-TEST_SUITE_END()
+TEST_SUITE_END() // L2NormalizeLayer
+TEST_SUITE_END() // CL
 } // namespace validation
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/validation/CL/ReductionOperation.cpp b/tests/validation/CL/ReductionOperation.cpp
index 516a134..2adb4e9 100644
--- a/tests/validation/CL/ReductionOperation.cpp
+++ b/tests/validation/CL/ReductionOperation.cpp
@@ -58,16 +58,16 @@
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
     framework::dataset::make("InputInfo",          { TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching data type input/output
                                                      TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1
-                                                     TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != F16/F32
+                                                     TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != QASYMM8/F16/F32
                                                      TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions
-                                                     TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis > 0 and SUM_SQUARE
+                                                     TensorInfo(TensorShape(128U, 64U), 1, DataType::QASYMM8), // Axis == 0 and SUM_SQUARE and QASYMM8
                                                      TensorInfo(TensorShape(128U, 64U), 1, DataType::F32)
                                                    }),
     framework::dataset::make("OutputInfo",         { TensorInfo(TensorShape(1U, 64U), 1, DataType::F16),
                                                      TensorInfo(TensorShape(1U, 64U), 1, DataType::F32),
                                                      TensorInfo(TensorShape(1U, 64U), 1, DataType::S16),
                                                      TensorInfo(TensorShape(1U, 64U), 1, DataType::F32),
-                                                     TensorInfo(TensorShape(1U, 64U), 1, DataType::F32),
+                                                     TensorInfo(TensorShape(1U, 64U), 1, DataType::QASYMM8),
                                                      TensorInfo(TensorShape(1U, 64U), 1, DataType::F32)
                                                    })),
     framework::dataset::make("Axis",               { 0U, 0U, 0U, static_cast<unsigned int>(TensorShape::num_max_dimensions), 1U, 0U })),
diff --git a/tests/validation/NEON/L2NormalizeLayer.cpp b/tests/validation/NEON/L2NormalizeLayer.cpp
index f868ade..0a1ddba 100644
--- a/tests/validation/NEON/L2NormalizeLayer.cpp
+++ b/tests/validation/NEON/L2NormalizeLayer.cpp
@@ -85,14 +85,18 @@
 
 TEST_SUITE(FP32)
 FIXTURE_DATA_TEST_CASE(RunSmall, NEL2NormalizeLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
-                       combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Epsilon", { 1e-12 })))
+                       combine(combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+                                       framework::dataset::make("Axis", { 0 })),
+                               framework::dataset::make("Epsilon", { 1e-12 })))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_f32);
 }
 
 FIXTURE_DATA_TEST_CASE(RunLarge, NEL2NormalizeLayerFixture<float>, framework::DatasetMode::NIGHTLY,
-                       combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Epsilon", { 1e-12 })))
+                       combine(combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+                                       framework::dataset::make("Axis", { 0 })),
+                               framework::dataset::make("Epsilon", { 1e-12 })))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_f32);
diff --git a/tests/validation/fixtures/L2NormalizeLayerFixture.h b/tests/validation/fixtures/L2NormalizeLayerFixture.h
index 6f11dcb..097d1c4 100644
--- a/tests/validation/fixtures/L2NormalizeLayerFixture.h
+++ b/tests/validation/fixtures/L2NormalizeLayerFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -45,10 +45,10 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape, DataType data_type, unsigned int axis, float epsilon)
+    void setup(TensorShape shape, DataType data_type, DataLayout data_layout, unsigned int axis, float epsilon)
     {
-        _target    = compute_target(shape, data_type, axis, epsilon);
-        _reference = compute_reference(shape, data_type, axis, epsilon);
+        _target    = compute_target(shape, data_type, data_layout, axis, epsilon);
+        _reference = compute_reference(shape, data_type, data_layout, axis, epsilon);
     }
 
 protected:
@@ -58,11 +58,16 @@
         library->fill_tensor_uniform(tensor, 0);
     }
 
-    TensorType compute_target(const TensorShape &shape, DataType data_type, unsigned int axis, float epsilon)
+    TensorType compute_target(TensorShape shape, DataType data_type, DataLayout data_layout, unsigned int axis, float epsilon)
     {
+        if(data_layout == DataLayout::NHWC)
+        {
+            permute(shape, PermutationVector(2U, 0U, 1U));
+        }
+
         // Create tensors
-        TensorType src = create_tensor<TensorType>(shape, data_type);
-        TensorType dst = create_tensor<TensorType>(shape, data_type);
+        TensorType src = create_tensor<TensorType>(shape, data_type, 1, QuantizationInfo(), data_layout);
+        TensorType dst = create_tensor<TensorType>(shape, data_type, 1, QuantizationInfo(), data_layout);
 
         // Create and configure function
         FunctionType l2_norm_func;
@@ -87,8 +92,25 @@
         return dst;
     }
 
-    SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, unsigned int axis, float epsilon)
+    SimpleTensor<T> compute_reference(const TensorShape &shape, DataType data_type, DataLayout data_layout, unsigned int axis, float epsilon)
     {
+        if(data_layout == DataLayout::NHWC)
+        {
+            switch(axis)
+            {
+                case 0:
+                    axis = 2;
+                    break;
+                case 1:
+                    axis = 0;
+                    break;
+                case 2:
+                    axis = 1;
+                    break;
+                default:
+                    break;
+            }
+        }
         // Create reference
         SimpleTensor<T> src{ shape, data_type };
 
diff --git a/tests/validation/reference/L2NormalizeLayer.cpp b/tests/validation/reference/L2NormalizeLayer.cpp
index 99f4e8a..2667751 100644
--- a/tests/validation/reference/L2NormalizeLayer.cpp
+++ b/tests/validation/reference/L2NormalizeLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -66,7 +66,7 @@
         {
             const T *src_row_ptr         = src.data() + du * elems;
             T       *dst_row_ptr         = dst.data() + du * elems;
-            const T  normalization_value = std::sqrt(std::max(sum[du], epsilon));
+            const T  normalization_value = sqrt(std::max(sum[du], static_cast<T>(epsilon)));
             std::transform(src_row_ptr, src_row_ptr + elems, dst_row_ptr, [normalization_value](T val)
             {
                 return val / normalization_value;
@@ -82,6 +82,7 @@
 }
 
 template SimpleTensor<float> l2_normalize(const SimpleTensor<float> &src, unsigned int axis, float epsilon);
+template SimpleTensor<half> l2_normalize(const SimpleTensor<half> &src, unsigned int axis, float epsilon);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/ReductionOperation.cpp b/tests/validation/reference/ReductionOperation.cpp
index 499263f..2f103a6 100644
--- a/tests/validation/reference/ReductionOperation.cpp
+++ b/tests/validation/reference/ReductionOperation.cpp
@@ -39,36 +39,39 @@
 namespace
 {
 template <typename T>
-struct square
+T reduce_operation(T *ptr, int reduce_elements, ReductionOperation op, int stride)
 {
-    T operator()(const T &lhs, const T &rhs) const
-    {
-        return (lhs + rhs * rhs);
-    }
-};
+    using type = typename std::remove_cv<T>::type;
+    auto res   = type(0);
 
-template <typename T>
-struct sum
-{
-    T operator()(const T &lhs, const T &rhs) const
+    if(std::is_integral<type>::value)
     {
-        return (lhs + rhs);
+        uint32_t int_res = 0;
+        for(int i = 0; i < reduce_elements; ++i)
+        {
+            auto elem = static_cast<uint32_t>(*(ptr + stride * i));
+            int_res += (op == ReductionOperation::SUM_SQUARE) ? elem * elem : elem;
+        }
+        if(op == ReductionOperation::MEAN_SUM && reduce_elements > 0)
+        {
+            int_res /= reduce_elements;
+        }
+        res = saturate_cast<type>(int_res);
     }
-};
+    else
+    {
+        for(int i = 0; i < reduce_elements; ++i)
+        {
+            auto elem = *(ptr + stride * i);
+            res += (op == ReductionOperation::SUM_SQUARE) ? elem * elem : elem;
+        }
+        if(op == ReductionOperation::MEAN_SUM && reduce_elements > 0)
+        {
+            res /= reduce_elements;
+        }
+    }
 
-template <typename T>
-T reduce_operation(T *ptr, int reduce_elements, ReductionOperation op)
-{
-    switch(op)
-    {
-        case ReductionOperation::SUM_SQUARE:
-            return std::accumulate(ptr, ptr + reduce_elements, static_cast<T>(0), square<T>());
-        case ReductionOperation::SUM:
-        case ReductionOperation::MEAN_SUM:
-            return std::accumulate(ptr, ptr + reduce_elements, static_cast<T>(0), sum<T>());
-        default:
-            ARM_COMPUTE_ERROR("Unsupported reduction operation");
-    }
+    return res;
 }
 } // namespace
 
@@ -77,44 +80,22 @@
 {
     // Create reference
     SimpleTensor<T>    dst{ dst_shape, src.data_type(), 1, src.quantization_info() };
-    const unsigned int src_width  = src.shape().x();
-    const unsigned int src_height = src.shape().y();
-    const unsigned int src_depth  = src.shape().z();
-    const unsigned int src_batch  = src.shape()[3];
-    const bool         mean       = op == ReductionOperation::MEAN_SUM;
+    const unsigned int src_width    = src.shape().x();
+    const unsigned int src_height   = src.shape().y();
+    const unsigned int src_depth    = src.shape().z();
+    const unsigned int src_batch    = src.shape()[3];
+    const int          reduce_elems = src.shape()[axis];
 
     switch(axis)
     {
         case 0:
         {
-            const int          reduce_elems = src.shape()[axis];
-            const unsigned int upper_dims   = src.shape().total_size_upper(1);
+            const unsigned int upper_dims = src.shape().total_size_upper(1);
             for(unsigned int du = 0; du < upper_dims; ++du)
             {
-                if(std::is_integral<T>::value)
-                {
-                    uint32_t res = 0;
-                    for(unsigned int x = 0; x < src_width; ++x)
-                    {
-                        res += static_cast<uint32_t>(src[du * src_width + x]);
-                    }
-                    if(mean && src_width > 0)
-                    {
-                        res /= src_width;
-                    }
-                    dst[du] = saturate_cast<uint8_t>(res);
-                }
-                else
-                {
-                    const T *src_row_ptr = src.data() + du * reduce_elems;
-
-                    auto res = reduce_operation(src_row_ptr, reduce_elems, op);
-                    if(mean && src_width > 0)
-                    {
-                        res /= src_width;
-                    }
-                    dst[du] = res;
-                }
+                const T *src_row_ptr = src.data() + du * reduce_elems;
+                auto     res         = reduce_operation(src_row_ptr, reduce_elems, op, 1);
+                dst[du]              = res;
             }
         }
         break;
@@ -125,32 +106,11 @@
             {
                 for(unsigned int x = 0; x < src_width; ++x)
                 {
-                    if(std::is_integral<T>::value)
-                    {
-                        uint32_t res = 0;
-                        for(unsigned int y = 0; y < src_height; ++y)
-                        {
-                            res += static_cast<uint32_t>(src[du * src_height * src_width + y * src_width + x]);
-                        }
-                        if(mean && src_height > 0)
-                        {
-                            res /= src_height;
-                        }
-                        dst[du * src_width + x] = saturate_cast<uint8_t>(res);
-                    }
-                    else
-                    {
-                        auto res = T(0);
-                        for(unsigned int y = 0; y < src_height; ++y)
-                        {
-                            res += src[du * src_height * src_width + y * src_width + x];
-                        }
-                        if(mean && src_height > 0)
-                        {
-                            res /= src_height;
-                        }
-                        dst[du * src_width + x] = res;
-                    }
+                    const int in_offset   = du * src_height * src_width + x;
+                    const int out_offset  = du * src_width + x;
+                    const T *src_row_ptr = src.data() + in_offset;
+                    auto      res         = reduce_operation(src_row_ptr, reduce_elems, op, src_width);
+                    dst[out_offset]       = res;
                 }
             }
         }
@@ -164,32 +124,11 @@
                 {
                     for(unsigned int y = 0; y < src_height; ++y)
                     {
-                        if(std::is_integral<T>::value)
-                        {
-                            uint32_t res = T(0);
-                            for(unsigned int z = 0; z < src_depth; ++z)
-                            {
-                                res += static_cast<uint32_t>(src[du * src_depth * src_height * src_width + z * src_height * src_width + y * src_width + x]);
-                            }
-                            if(mean && src_depth > 0)
-                            {
-                                res /= src_depth;
-                            }
-                            dst[du * src_width * src_height + y * src_width + x] = saturate_cast<uint8_t>(res);
-                        }
-                        else
-                        {
-                            auto res = T(0);
-                            for(unsigned int z = 0; z < src_depth; ++z)
-                            {
-                                res += src[du * src_depth * src_height * src_width + z * src_height * src_width + y * src_width + x];
-                            }
-                            if(mean && src_depth > 0)
-                            {
-                                res /= src_depth;
-                            }
-                            dst[du * src_width * src_height + y * src_width + x] = res;
-                        }
+                        const int in_offset   = du * src_depth * src_height * src_width + y * src_width + x;
+                        const int out_offset  = du * src_width * src_height + y * src_width + x;
+                        const T *src_row_ptr = src.data() + in_offset;
+                        auto      res         = reduce_operation(src_row_ptr, reduce_elems, op, src_height * src_width);
+                        dst[out_offset]       = res;
                     }
                 }
             }
@@ -206,34 +145,11 @@
                     {
                         for(unsigned int x = 0; x < src_width; ++x)
                         {
-                            if(std::is_integral<T>::value)
-                            {
-                                uint32_t res = 0;
-                                for(unsigned int w = 0; w < src_batch; ++w)
-                                {
-                                    res += static_cast<uint32_t>(src[du * src_batch * src_depth * src_height * src_width + w * src_width * src_height * src_depth + z * src_width * src_height + y * src_width + x]);
-                                }
-                                if(mean && src_batch > 0)
-                                {
-                                    res /= src_batch;
-                                }
-
-                                dst[du * src_depth * src_height * src_width + z * src_width * src_height + y * src_width + x] = saturate_cast<uint8_t>(res);
-                            }
-                            else
-                            {
-                                auto res = T(0);
-                                for(unsigned int w = 0; w < src_batch; ++w)
-                                {
-                                    res += src[du * src_batch * src_depth * src_height * src_width + w * src_width * src_height * src_depth + z * src_width * src_height + y * src_width + x];
-                                }
-                                if(mean && src_batch > 0)
-                                {
-                                    res /= src_batch;
-                                }
-
-                                dst[du * src_depth * src_height * src_width + z * src_width * src_height + y * src_width + x] = res;
-                            }
+                            const int in_offset   = du * src_batch * src_depth * src_height * src_width + z * src_width * src_height + y * src_width + x;
+                            const int out_offset  = du * src_depth * src_height * src_width + z * src_width * src_height + y * src_width + x;
+                            const T *src_row_ptr = src.data() + in_offset;
+                            auto      res         = reduce_operation(src_row_ptr, reduce_elems, op, src_width * src_height * src_depth);
+                            dst[out_offset]       = res;
                         }
                     }
                 }