COMPMID-1574 Implement ReduceMean in OpenCL

Change-Id: Id331199f569f52a37280a9ada5bf84694580b93c
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/152843
Tested-by: bsgcomp <bsgcomp@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
diff --git a/tests/validation/CL/ReduceMean.cpp b/tests/validation/CL/ReduceMean.cpp
new file mode 100644
index 0000000..07e859f
--- /dev/null
+++ b/tests/validation/CL/ReduceMean.cpp
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLReduceMean.h"
+
+#include "tests/CL/CLAccessor.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/datasets/SplitDataset.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/ReduceMeanFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+constexpr AbsoluteTolerance<float>   tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for 32-bit floating-point type */
+constexpr AbsoluteTolerance<float>   tolerance_f16(0.03f);  /**< Tolerance value for comparing reference's output against implementation's output for 16-bit floating-point type */
+constexpr AbsoluteTolerance<uint8_t> tolerance_qasymm8(1);  /**< Tolerance value for comparing reference's output against implementation's output for 8-bit asymmetric quantized type */
+
+const auto axis_keep = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1, 0), Coordinates(1, 2), Coordinates(0, 2), Coordinates(1, 3), Coordinates(0, 1, 2, 3) }),
+                               framework::dataset::make("KeepDims", { true }));
+const auto axis_drop = combine(framework::dataset::make("Axis", { Coordinates(0), Coordinates(1), Coordinates(3) }), framework::dataset::make("KeepDims", { false }));
+} // namespace
+TEST_SUITE(CL)
+TEST_SUITE(ReduceMean)
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
+        framework::dataset::make("InputInfo", { TensorInfo(TensorShape(27U, 3U, 16U, 2U), 1, DataType::F32), // Invalid axis
+                                                TensorInfo(TensorShape(27U, 3U, 16U, 2U), 1, DataType::F32), // Invalid output shape
+                                                TensorInfo(TensorShape(32U, 16U, 16U, 2U), 1, DataType::F32)
+        }),
+        framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32),
+                                                 TensorInfo(TensorShape(27U, 3U, 1U, 2U), 1, DataType::F32),
+                                                 TensorInfo(TensorShape(32U, 16U, 1U, 2U), 1, DataType::F32)
+        })),
+        framework::dataset::make("Axis", { Coordinates(4), Coordinates(0,2), Coordinates(2) })),
+        framework::dataset::make("Expected", { false, false, true })),
+        input_info, output_info, axis, expected)
+{
+    const Status status = CLReduceMean::validate(&input_info.clone()->set_is_resizable(false), axis, true, &output_info.clone()->set_is_resizable(false));
+    ARM_COMPUTE_EXPECT(bool(status) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+DATA_TEST_CASE(Configuration,
+               framework::DatasetMode::ALL,
+               combine(datasets::SmallShapes(), framework::dataset::make("DataType", { DataType::F16, DataType::F32 })),
+               shape, data_type)
+{
+    // Create tensors
+    CLTensor ref_src = create_tensor<CLTensor>(shape, data_type);
+    CLTensor dst;
+
+    Coordinates axis(1);
+
+    // Create and Configure function
+    CLReduceMean reduce_mean;
+    reduce_mean.configure(&ref_src, axis, true, &dst);
+
+    // Validate valid region
+    TensorShape output_shape = shape;
+    output_shape.set(1, 1);
+    const ValidRegion valid_region = shape_to_valid_region(output_shape);
+    validate(dst.info()->valid_region(), valid_region);
+}
+
+template <typename T>
+using CLReduceMeanFixture = ReduceMeanFixture<CLTensor, CLAccessor, CLReduceMean, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+                       CLReduceMeanFixture<half>,
+                       framework::DatasetMode::PRECOMMIT,
+                       combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F16)), concat(axis_keep, axis_drop)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge,
+                       CLReduceMeanFixture<half>,
+                       framework::DatasetMode::NIGHTLY,
+                       combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F16)), concat(axis_keep, axis_drop)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+TEST_SUITE_END() // FP16
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+                       CLReduceMeanFixture<float>,
+                       framework::DatasetMode::PRECOMMIT,
+                       combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::F32)), concat(axis_keep, axis_drop)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge,
+                       CLReduceMeanFixture<float>,
+                       framework::DatasetMode::NIGHTLY,
+                       combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::F32)), concat(axis_keep, axis_drop)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+
+template <typename T>
+using CLReduceMeanQuantizedFixture = ReduceMeanQuantizedFixture<CLTensor, CLAccessor, CLReduceMean, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+FIXTURE_DATA_TEST_CASE(RunSmall,
+                       CLReduceMeanQuantizedFixture<uint8_t>,
+                       framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(datasets::Small4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge,
+                       CLReduceMeanQuantizedFixture<uint8_t>,
+                       framework::DatasetMode::NIGHTLY,
+                       combine(combine(combine(datasets::Large4DShapes(), framework::dataset::make("DataType", DataType::QASYMM8)), concat(axis_keep, axis_drop)), framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255, 0) })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_qasymm8);
+}
+TEST_SUITE_END() // QASYMM8
+TEST_SUITE_END() // Quantized
+TEST_SUITE_END() // ReduceMean
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/CL/ReductionOperation.cpp b/tests/validation/CL/ReductionOperation.cpp
index ca0988f..794db1a 100644
--- a/tests/validation/CL/ReductionOperation.cpp
+++ b/tests/validation/CL/ReductionOperation.cpp
@@ -45,7 +45,7 @@
 {
 /** Tolerance for float operations */
 RelativeTolerance<float> tolerance_f32(0.00001f);
-RelativeTolerance<float> tolerance_f16(0.1f);
+AbsoluteTolerance<float> tolerance_f16(0.1f);
 } // namespace
 
 TEST_SUITE(CL)
@@ -58,7 +58,7 @@
                                                      TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1
                                                      TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != F16/F32
                                                      TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions
-                                                     TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis > 0
+                                                     TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis > 0 and SUM_SQUARE
                                                      TensorInfo(TensorShape(128U, 64U), 1, DataType::F32)
                                                    }),
     framework::dataset::make("OutputInfo",         { TensorInfo(TensorShape(1U, 64U), 1, DataType::F16),
@@ -87,13 +87,13 @@
 TEST_SUITE(Float)
 TEST_SUITE(FP16)
 FIXTURE_DATA_TEST_CASE(RunSmall, CLReductionOperationFixture<half>, framework::DatasetMode::PRECOMMIT,
-                       combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0 })), datasets::ReductionOperations()))
+                       combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), datasets::ReductionOperations()))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f16);
 }
 FIXTURE_DATA_TEST_CASE(RunLarge, CLReductionOperationFixture<half>, framework::DatasetMode::NIGHTLY,
-                       combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0 })), datasets::ReductionOperations()))
+                       combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F16)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), datasets::ReductionOperations()))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f16);
@@ -101,13 +101,13 @@
 TEST_SUITE_END() // F16
 TEST_SUITE(FP32)
 FIXTURE_DATA_TEST_CASE(RunSmall, CLReductionOperationFixture<float>, framework::DatasetMode::PRECOMMIT,
-                       combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), datasets::ReductionOperations()))
+                       combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), datasets::ReductionOperations()))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f32);
 }
 FIXTURE_DATA_TEST_CASE(RunLarge, CLReductionOperationFixture<float>, framework::DatasetMode::NIGHTLY,
-                       combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), datasets::ReductionOperations()))
+                       combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0, 1, 2, 3 })), datasets::ReductionOperations()))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f32);
diff --git a/tests/validation/NEON/ReductionOperation.cpp b/tests/validation/NEON/ReductionOperation.cpp
index c2f2909..b0480b0 100644
--- a/tests/validation/NEON/ReductionOperation.cpp
+++ b/tests/validation/NEON/ReductionOperation.cpp
@@ -85,13 +85,13 @@
 
 TEST_SUITE(FP32)
 FIXTURE_DATA_TEST_CASE(RunSmall, NEReductionOperationFixture<float>, framework::DatasetMode::PRECOMMIT,
-                       combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), datasets::ReductionOperations()))
+                       combine(combine(combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Op", { ReductionOperation::SUM_SQUARE })))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_f32);
 }
 FIXTURE_DATA_TEST_CASE(RunLarge, NEReductionOperationFixture<float>, framework::DatasetMode::NIGHTLY,
-                       combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), datasets::ReductionOperations()))
+                       combine(combine(combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)), framework::dataset::make("Axis", { 0 })), framework::dataset::make("Op", { ReductionOperation::SUM_SQUARE })))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_f32);
diff --git a/tests/validation/fixtures/ReduceMeanFixture.h b/tests/validation/fixtures/ReduceMeanFixture.h
new file mode 100644
index 0000000..6debd4a
--- /dev/null
+++ b/tests/validation/fixtures/ReduceMeanFixture.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_REDUCE_MEAN_FIXTURE
+#define ARM_COMPUTE_TEST_REDUCE_MEAN_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/ReductionOperation.h"
+#include "tests/validation/reference/ReshapeLayer.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ReduceMeanValidationFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info)
+    {
+        _target    = compute_target(shape, data_type, axis, keep_dims, quantization_info);
+        _reference = compute_reference(shape, data_type, axis, keep_dims, quantization_info);
+    }
+
+protected:
+    template <typename U>
+    void fill(U &&tensor)
+    {
+        if(!is_data_type_quantized(tensor.data_type()))
+        {
+            std::uniform_real_distribution<> distribution(-1.0f, 1.0f);
+            library->fill(tensor, distribution, 0);
+        }
+        else
+        {
+            const QuantizationInfo          quant_info = tensor.quantization_info();
+            const int                       min_bound  = quant_info.quantize(-1.f, RoundingPolicy::TO_NEAREST_UP);
+            const int                       max_bound  = quant_info.quantize(1.f, RoundingPolicy::TO_NEAREST_UP);
+            std::uniform_int_distribution<> distribution(min_bound, max_bound);
+
+            library->fill(tensor, distribution, 0);
+        }
+    }
+
+    TensorType compute_target(TensorShape &src_shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info)
+    {
+        // Create tensors
+        TensorType src = create_tensor<TensorType>(src_shape, data_type, 1, quantization_info);
+        TensorType dst;
+
+        // Create and configure function
+        FunctionType reduction_mean;
+        reduction_mean.configure(&src, axis, keep_dims, &dst);
+
+        ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Allocate tensors
+        src.allocator()->allocate();
+        dst.allocator()->allocate();
+
+        ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Fill tensors
+        fill(AccessorType(src));
+
+        // Compute function
+        reduction_mean.run();
+
+        return dst;
+    }
+
+    SimpleTensor<T> compute_reference(TensorShape &src_shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info)
+    {
+        // Create reference
+        SimpleTensor<T> src{ src_shape, data_type, 1, quantization_info };
+
+        // Fill reference
+        fill(src);
+
+        SimpleTensor<T> out;
+        for(unsigned int i = 0; i < axis.num_dimensions(); ++i)
+        {
+            TensorShape output_shape = i == 0 ? src_shape : out.shape();
+            output_shape.set(axis[i], 1);
+            out = reference::reduction_operation<T>(i == 0 ? src : out, output_shape, axis[i], ReductionOperation::MEAN_SUM);
+        }
+
+        if(!keep_dims)
+        {
+            TensorShape output_shape = src_shape;
+            for(unsigned int i = 0; i < axis.num_dimensions(); ++i)
+            {
+                output_shape.remove_dimension(axis[i]);
+            }
+
+            out = reference::reshape_layer(out, output_shape);
+        }
+        return out;
+    }
+
+    TensorType      _target{};
+    SimpleTensor<T> _reference{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ReduceMeanQuantizedFixture : public ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims, QuantizationInfo quantization_info = QuantizationInfo())
+    {
+        ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, keep_dims, quantization_info);
+    }
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class ReduceMeanFixture : public ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape, DataType data_type, Coordinates axis, bool keep_dims)
+    {
+        ReduceMeanValidationFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, data_type, axis, keep_dims, QuantizationInfo());
+    }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_REDUCE_MEAN_FIXTURE */
diff --git a/tests/validation/reference/ReductionOperation.cpp b/tests/validation/reference/ReductionOperation.cpp
index 871a761..11947bd 100644
--- a/tests/validation/reference/ReductionOperation.cpp
+++ b/tests/validation/reference/ReductionOperation.cpp
@@ -48,12 +48,24 @@
 };
 
 template <typename T>
+struct sum
+{
+    T operator()(const T &lhs, const T &rhs) const
+    {
+        return (lhs + rhs);
+    }
+};
+
+template <typename T>
 T reduce_operation(T *ptr, int reduce_elements, ReductionOperation op)
 {
     switch(op)
     {
         case ReductionOperation::SUM_SQUARE:
             return std::accumulate(ptr, ptr + reduce_elements, static_cast<T>(0), square<T>());
+        case ReductionOperation::SUM:
+        case ReductionOperation::MEAN_SUM:
+            return std::accumulate(ptr, ptr + reduce_elements, static_cast<T>(0), sum<T>());
         default:
             ARM_COMPUTE_ERROR("Unsupported reduction operation");
     }
@@ -64,23 +76,172 @@
 SimpleTensor<T> reduction_operation(const SimpleTensor<T> &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op)
 {
     // Create reference
-    SimpleTensor<T> dst{ dst_shape, src.data_type() };
+    SimpleTensor<T>    dst{ dst_shape, src.data_type() };
+    const unsigned int src_width  = src.shape().x();
+    const unsigned int src_height = src.shape().y();
+    const unsigned int src_depth  = src.shape().z();
+    const unsigned int src_batch  = src.shape()[3];
+    const bool         mean       = op == ReductionOperation::MEAN_SUM;
 
-    // Compute reference
-    const int reduce_elems = src.shape()[axis];
-    const int upper_dims   = src.shape().total_size_upper(axis + 1);
-
-    for(int du = 0; du < upper_dims; ++du)
+    switch(axis)
     {
-        if(axis == 0)
+        case 0:
         {
-            const T *src_row_ptr = src.data() + du * reduce_elems;
-            dst[du]              = reduce_operation(src_row_ptr, reduce_elems, op);
+            const int          reduce_elems = src.shape()[axis];
+            const unsigned int upper_dims   = src.shape().total_size_upper(1);
+            for(unsigned int du = 0; du < upper_dims; ++du)
+            {
+                if(std::is_integral<T>::value)
+                {
+                    uint32_t res = 0;
+                    for(unsigned int x = 0; x < src_width; ++x)
+                    {
+                        res += static_cast<uint32_t>(src[du * src_width + x]);
+                    }
+                    if(mean && src_width > 0)
+                    {
+                        res /= src_width;
+                    }
+                    dst[du] = static_cast<uint8_t>(res);
+                }
+                else
+                {
+                    const T *src_row_ptr = src.data() + du * reduce_elems;
+
+                    auto res = reduce_operation(src_row_ptr, reduce_elems, op);
+                    if(mean && src_width > 0)
+                    {
+                        res /= src_width;
+                    }
+                    dst[du] = res;
+                }
+            }
         }
-        else
+        break;
+        case 1:
         {
+            const unsigned int upper_dims = src.shape().total_size_upper(2);
+            for(unsigned int du = 0; du < upper_dims; ++du)
+            {
+                for(unsigned int x = 0; x < src_width; ++x)
+                {
+                    if(std::is_integral<T>::value)
+                    {
+                        uint32_t res = 0;
+                        for(unsigned int y = 0; y < src_height; ++y)
+                        {
+                            res += static_cast<uint32_t>(src[du * src_height * src_width + y * src_width + x]);
+                        }
+                        if(mean && src_height > 0)
+                        {
+                            res /= src_height;
+                        }
+                        dst[du * src_width + x] = static_cast<uint8_t>(res);
+                    }
+                    else
+                    {
+                        auto res = T(0);
+                        for(unsigned int y = 0; y < src_height; ++y)
+                        {
+                            res += src[du * src_height * src_width + y * src_width + x];
+                        }
+                        if(mean && src_height > 0)
+                        {
+                            res /= src_height;
+                        }
+                        dst[du * src_width + x] = res;
+                    }
+                }
+            }
+        }
+        break;
+        case 2:
+        {
+            const unsigned int upper_dims = src.shape().total_size_upper(3);
+            for(unsigned int du = 0; du < upper_dims; ++du)
+            {
+                for(unsigned int x = 0; x < src_width; ++x)
+                {
+                    for(unsigned int y = 0; y < src_height; ++y)
+                    {
+                        if(std::is_integral<T>::value)
+                        {
+                            uint32_t res = T(0);
+                            for(unsigned int z = 0; z < src_depth; ++z)
+                            {
+                                res += static_cast<uint32_t>(src[du * src_depth * src_height * src_width + z * src_height * src_width + y * src_width + x]);
+                            }
+                            if(mean && src_depth > 0)
+                            {
+                                res /= src_depth;
+                            }
+                            dst[du * src_width * src_height + y * src_width + x] = static_cast<uint8_t>(res);
+                        }
+                        else
+                        {
+                            auto res = T(0);
+                            for(unsigned int z = 0; z < src_depth; ++z)
+                            {
+                                res += src[du * src_depth * src_height * src_width + z * src_height * src_width + y * src_width + x];
+                            }
+                            if(mean && src_depth > 0)
+                            {
+                                res /= src_depth;
+                            }
+                            dst[du * src_width * src_height + y * src_width + x] = res;
+                        }
+                    }
+                }
+            }
+        }
+        break;
+        case 3:
+        {
+            const unsigned int upper_dims = src.shape().total_size_upper(4);
+            for(unsigned int du = 0; du < upper_dims; ++du)
+            {
+                for(unsigned int z = 0; z < src_depth; ++z)
+                {
+                    for(unsigned int y = 0; y < src_height; ++y)
+                    {
+                        for(unsigned int x = 0; x < src_width; ++x)
+                        {
+                            if(std::is_integral<T>::value)
+                            {
+                                uint32_t res = 0;
+                                for(unsigned int w = 0; w < src_batch; ++w)
+                                {
+                                    res += static_cast<uint32_t>(src[du * src_batch * src_depth * src_height * src_width + w * src_width * src_height * src_depth + z * src_width * src_height + y * src_width + x]);
+                                }
+                                if(mean && src_batch > 0)
+                                {
+                                    res /= src_batch;
+                                }
+
+                                dst[du * src_depth * src_height * src_width + z * src_width * src_height + y * src_width + x] = static_cast<uint8_t>(res);
+                            }
+                            else
+                            {
+                                auto res = T(0);
+                                for(unsigned int w = 0; w < src_batch; ++w)
+                                {
+                                    res += src[du * src_batch * src_depth * src_height * src_width + w * src_width * src_height * src_depth + z * src_width * src_height + y * src_width + x];
+                                }
+                                if(mean && src_batch > 0)
+                                {
+                                    res /= src_batch;
+                                }
+
+                                dst[du * src_depth * src_height * src_width + z * src_width * src_height + y * src_width + x] = res;
+                            }
+                        }
+                    }
+                }
+            }
+        }
+        break;
+        default:
             ARM_COMPUTE_ERROR("Unsupported reduction axis");
-        }
     }
 
     return dst;
@@ -88,6 +249,7 @@
 
 template SimpleTensor<float> reduction_operation(const SimpleTensor<float> &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op);
 template SimpleTensor<half> reduction_operation(const SimpleTensor<half> &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op);
+template SimpleTensor<uint8_t> reduction_operation(const SimpleTensor<uint8_t> &src, const TensorShape &dst_shape, unsigned int axis, ReductionOperation op);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/ReductionOperation.h b/tests/validation/reference/ReductionOperation.h
index 6da6436..859b57a 100644
--- a/tests/validation/reference/ReductionOperation.h
+++ b/tests/validation/reference/ReductionOperation.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *