COMPMID-417: Port PoolingLayer to new validation.

Change-Id: I7f2f5f5f81ad9932661fc4c660bf90614288bc96
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/85270
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/tests/TypePrinter.h b/tests/TypePrinter.h
index 020b559..49e717a 100644
--- a/tests/TypePrinter.h
+++ b/tests/TypePrinter.h
@@ -86,6 +86,13 @@
     return os;
 }
 
+inline std::string to_string(const PadStrideInfo &pad_stride_info)
+{
+    std::stringstream str;
+    str << pad_stride_info;
+    return str.str();
+}
+
 /** Formatted output of the ROIPoolingInfo type. */
 inline ::std::ostream &operator<<(::std::ostream &os, const ROIPoolingLayerInfo &pool_info)
 {
@@ -329,6 +336,13 @@
     return os;
 }
 
+inline std::string to_string(const PoolingType &type)
+{
+    std::stringstream str;
+    str << type;
+    return str.str();
+}
+
 /** Formatted output of @ref PoolingLayerInfo. */
 inline ::std::ostream &operator<<(::std::ostream &os, const PoolingLayerInfo &info)
 {
@@ -337,6 +351,13 @@
     return os;
 }
 
+inline std::string to_string(const PoolingLayerInfo &info)
+{
+    std::stringstream str;
+    str << info.pool_type();
+    return str.str();
+}
+
 /** Formatted output of the RoundingPolicy type. */
 inline ::std::ostream &operator<<(::std::ostream &os, const RoundingPolicy &rounding_policy)
 {
diff --git a/tests/dataset/PoolingLayerDataset.h b/tests/dataset/PoolingLayerDataset.h
index 1496cad..ee3e6dc 100644
--- a/tests/dataset/PoolingLayerDataset.h
+++ b/tests/dataset/PoolingLayerDataset.h
@@ -133,29 +133,6 @@
 
     ~GoogLeNetPoolingLayerDataset() = default;
 };
-
-class RandomPoolingLayerDataset final : public PoolingLayerDataset<10>
-{
-public:
-    RandomPoolingLayerDataset()
-        : GenericDataset
-    {
-        PoolingLayerDataObject{ TensorShape(27U, 27U, 16U), TensorShape(13U, 13U, 16U), PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)) },
-        PoolingLayerDataObject{ TensorShape(13U, 13U, 32U), TensorShape(6U, 6U, 32U), PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0)) },
-        PoolingLayerDataObject{ TensorShape(24U, 24U, 10U), TensorShape(12U, 12U, 10U), PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0)) },
-        PoolingLayerDataObject{ TensorShape(8U, 8U, 30U), TensorShape(4U, 4U, 30U), PoolingLayerInfo(PoolingType::MAX, 2, PadStrideInfo(2, 2, 0, 0)) },
-        PoolingLayerDataObject{ TensorShape(27U, 27U, 16U), TensorShape(13U, 13U, 16U), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(2, 2, 0, 0)) },
-        PoolingLayerDataObject{ TensorShape(13U, 13U, 32U), TensorShape(6U, 6U, 32U), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(2, 2, 0, 0)) },
-        PoolingLayerDataObject{ TensorShape(24U, 24U, 10U), TensorShape(12U, 12U, 10U), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(2, 2, 0, 0)) },
-        PoolingLayerDataObject{ TensorShape(8U, 8U, 30U), TensorShape(4U, 4U, 30U), PoolingLayerInfo(PoolingType::AVG, 2, PadStrideInfo(2, 2, 0, 0)) },
-        PoolingLayerDataObject{ TensorShape(7U, 7U, 10U), TensorShape(7U, 7U, 10U), PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1)) },
-        PoolingLayerDataObject{ TensorShape(7U, 7U, 10U), TensorShape(7U, 7U, 10U), PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(1, 1, 1, 1)) },
-    }
-    {
-    }
-
-    ~RandomPoolingLayerDataset() = default;
-};
 } // namespace test
 } // namespace arm_compute
 #endif //__ARM_COMPUTE_TEST_DATASET_POOLING_LAYER_DATASET_H__
diff --git a/tests/datasets_new/PoolingTypesDataset.h b/tests/datasets_new/PoolingTypesDataset.h
new file mode 100644
index 0000000..4e4fa26
--- /dev/null
+++ b/tests/datasets_new/PoolingTypesDataset.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_POOLING_TYPES_DATASET_H__
+#define __ARM_COMPUTE_TEST_POOLING_TYPES_DATASET_H__
+
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class PoolingTypes final : public framework::dataset::ContainerDataset<std::vector<PoolingType>>
+{
+public:
+    PoolingTypes()
+        : ContainerDataset("PoolType",
+    {
+        PoolingType::MAX, PoolingType::AVG
+    })
+    {
+    }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_NORMALIZATION_TYPES_DATASET_H__ */
diff --git a/tests/datasets_new/ShapeDatasets.h b/tests/datasets_new/ShapeDatasets.h
index 72681d7..50a7010 100644
--- a/tests/datasets_new/ShapeDatasets.h
+++ b/tests/datasets_new/ShapeDatasets.h
@@ -77,7 +77,6 @@
                      // Batch size 4
                      TensorShape{ 7U, 7U, 4U },
                      TensorShape{ 27U, 13U, 2U, 4U },
-                     TensorShape{ 128U, 64U, 1U, 3U, 4U },
                      // Arbitrary batch size
                      TensorShape{ 7U, 7U, 5U }
     })
diff --git a/tests/datasets_new/system_tests/alexnet/AlexNetActivationLayerDataset.h b/tests/datasets_new/system_tests/alexnet/AlexNetActivationLayerDataset.h
index 27e9956..7062c2e 100644
--- a/tests/datasets_new/system_tests/alexnet/AlexNetActivationLayerDataset.h
+++ b/tests/datasets_new/system_tests/alexnet/AlexNetActivationLayerDataset.h
@@ -44,10 +44,7 @@
     AlexNetActivationLayerDataset()
         : CartesianProductDataset
     {
-        framework::dataset::make("Shape", {
-            TensorShape(55U, 55U, 96U), TensorShape(27U, 27U, 256U),
-            TensorShape(13U, 13U, 384U), TensorShape(13U, 13U, 256U),
-            TensorShape(4096U) }),
+        framework::dataset::make("Shape", { TensorShape(55U, 55U, 96U), TensorShape(27U, 27U, 256U), TensorShape(13U, 13U, 384U), TensorShape(13U, 13U, 256U), TensorShape(4096U) }),
         framework::dataset::make("Info", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
     }
     {
diff --git a/tests/validation/CL/PoolingLayer.cpp b/tests/validation/CL/PoolingLayer.cpp
deleted file mode 100644
index 286b1d9..0000000
--- a/tests/validation/CL/PoolingLayer.cpp
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "CL/CLAccessor.h"
-#include "TypePrinter.h"
-#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
-#include "tests/Globals.h"
-#include "tests/Utils.h"
-#include "tests/dataset/PoolingLayerDataset.h"
-#include "validation/Datasets.h"
-#include "validation/Reference.h"
-#include "validation/Validation.h"
-
-#include <random>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-using namespace arm_compute::test::validation;
-
-namespace
-{
-const float tolerance_qs8  = 3;     /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
-const float tolerance_qs16 = 6;     /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
-const float tolerance_f    = 1e-05; /**< Tolerance value for comparing reference's output against implementation's output for float input */
-
-/** Compute CL pooling layer function.
- *
- * @param[in] shape                Shape of the input and output tensors.
- * @param[in] dt                   Data type of input and output tensors.
- * @param[in] pool_info            Pooling Layer information.
- * @param[in] fixed_point_position The fixed point position.
- *
- * @return Computed output tensor.
- */
-CLTensor compute_pooling_layer(const TensorShape &shape_in, const TensorShape &shape_out, DataType dt, PoolingLayerInfo pool_info, int fixed_point_position = 0)
-{
-    // Create tensors
-    CLTensor src = create_tensor<CLTensor>(shape_in, dt, 1, fixed_point_position);
-    CLTensor dst = create_tensor<CLTensor>(shape_out, dt, 1, fixed_point_position);
-
-    // Create and configure function
-    CLPoolingLayer pool;
-    pool.configure(&src, &dst, pool_info);
-
-    // Allocate tensors
-    src.allocator()->allocate();
-    dst.allocator()->allocate();
-
-    BOOST_TEST(!src.info()->is_resizable());
-    BOOST_TEST(!dst.info()->is_resizable());
-
-    // Fill tensors
-    // Fill tensors
-    int min = 0;
-    int max = 0;
-    switch(dt)
-    {
-        case DataType::F32:
-            min = -1;
-            max = 1;
-            break;
-        case DataType::QS8:
-        case DataType::QS16:
-            min = -(1 << fixed_point_position);
-            max = (1 << fixed_point_position);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("DataType not supported.");
-    }
-    std::uniform_real_distribution<> distribution(min, max);
-    library->fill(CLAccessor(src), distribution, 0);
-
-    // Compute function
-    pool.run();
-
-    return dst;
-}
-
-TensorShape get_output_shape(TensorShape in_shape, const PoolingLayerInfo &pool_info)
-{
-    TensorShape out_shape(in_shape);
-    const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(in_shape.x(),
-                                                                                             in_shape.y(),
-                                                                                             pool_info.pool_size(),
-                                                                                             pool_info.pool_size(),
-                                                                                             pool_info.pad_stride_info());
-    out_shape.set(0, scaled_dims.first);
-    out_shape.set(1, scaled_dims.second);
-    return out_shape;
-}
-} // namespace
-
-#ifndef DOXYGEN_SKIP_THIS
-BOOST_AUTO_TEST_SUITE(CL)
-BOOST_AUTO_TEST_SUITE(PoolingLayer)
-
-BOOST_AUTO_TEST_SUITE(Float)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RunSmall, SmallShapes() * CNNFloatDataTypes() * PoolingTypes() * boost::unit_test::data::make({ 2, 3, 7 }) * boost::unit_test::data::make({ 1, 2 }) * boost::unit_test::data::make({ 0, 1 }),
-                     src_shape, dt, pool_type, pool_size, pool_stride, pool_pad)
-{
-    PoolingLayerInfo pool_info(pool_type, pool_size, PadStrideInfo(pool_stride, pool_stride, pool_pad, pool_pad, DimensionRoundingType::CEIL));
-    TensorShape      dst_shape = get_output_shape(src_shape, pool_info);
-
-    // Compute function
-    CLTensor dst = compute_pooling_layer(src_shape, dst_shape, dt, pool_info);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_pooling_layer(src_shape, dst_shape, dt, pool_info);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance_f);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(Quantized)
-
-BOOST_AUTO_TEST_SUITE(QS8)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
-                     RandomPoolingLayerDataset() * boost::unit_test::data::xrange(1, 5),
-                     obj, fixed_point_position)
-{
-    // Compute function
-    CLTensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, DataType::QS8, obj.info, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, DataType::QS8, obj.info, fixed_point_position);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance_qs8, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(QS16)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
-                     RandomPoolingLayerDataset() * boost::unit_test::data::xrange(1, 12),
-                     obj, fixed_point_position)
-{
-    // Compute function
-    CLTensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, DataType::QS16, obj.info, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, DataType::QS16, obj.info, fixed_point_position);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance_qs16, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation/Datasets.h b/tests/validation/Datasets.h
index 64918fc..15e1b09 100644
--- a/tests/validation/Datasets.h
+++ b/tests/validation/Datasets.h
@@ -37,7 +37,6 @@
 #include "dataset/MatrixPatternDataset.h"
 #include "dataset/NonLinearFilterFunctionDataset.h"
 #include "dataset/NormalizationTypeDataset.h"
-#include "dataset/PoolingLayerDataset.h"
 #include "dataset/PoolingTypesDataset.h"
 #include "dataset/RoundingPolicyDataset.h"
 #include "dataset/ShapeDatasets.h"
@@ -177,12 +176,6 @@
 
 /// Register the data set with Boost
 template <>
-struct is_dataset<arm_compute::test::RandomPoolingLayerDataset> : boost::mpl::true_
-{
-};
-
-/// Register the data set with Boost
-template <>
 struct is_dataset<arm_compute::test::RoundingPolicies> : boost::mpl::true_
 {
 };
diff --git a/tests/validation/NEON/PoolingLayer.cpp b/tests/validation/NEON/PoolingLayer.cpp
deleted file mode 100644
index 8b4ff18..0000000
--- a/tests/validation/NEON/PoolingLayer.cpp
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "NEON/Accessor.h"
-#include "TypePrinter.h"
-#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
-#include "tests/Globals.h"
-#include "tests/Utils.h"
-#include "tests/dataset/PoolingLayerDataset.h"
-#include "validation/Datasets.h"
-#include "validation/Reference.h"
-#include "validation/Validation.h"
-
-#include <random>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-using namespace arm_compute::test::validation;
-
-namespace
-{
-const float tolerance_q   = 0;     /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
-const float tolerance_f32 = 1e-05; /**< Tolerance value for comparing reference's output against implementation's output for float input */
-#ifdef ARM_COMPUTE_ENABLE_FP16
-const float tolerance_f16 = 0.001f; /**< Tolerance value for comparing reference's output against half precision floating point implementation's output */
-#endif                              /* ARM_COMPUTE_ENABLE_FP16 */
-
-/** Compute Neon pooling layer function.
- *
- * @param[in] shape     Shape of the input and output tensors.
- * @param[in] dt        Data type of input and output tensors.
- * @param[in] pool_info Pooling Layer information.
- *
- * @return Computed output tensor.
- */
-Tensor compute_pooling_layer(const TensorShape &shape_in, const TensorShape &shape_out, DataType dt, PoolingLayerInfo pool_info, int fixed_point_position = 0)
-{
-    // Create tensors
-    Tensor src = create_tensor<Tensor>(shape_in, dt, 1, fixed_point_position);
-    Tensor dst = create_tensor<Tensor>(shape_out, dt, 1, fixed_point_position);
-
-    // Create and configure function
-    NEPoolingLayer pool;
-    pool.configure(&src, &dst, pool_info);
-
-    // Allocate tensors
-    src.allocator()->allocate();
-    dst.allocator()->allocate();
-
-    BOOST_TEST(!src.info()->is_resizable());
-    BOOST_TEST(!dst.info()->is_resizable());
-
-    // Fill tensors
-    int min = 0;
-    int max = 0;
-    switch(dt)
-    {
-        case DataType::F32:
-        case DataType::F16:
-            min = -1;
-            max = 1;
-            break;
-        case DataType::QS8:
-        case DataType::QS16:
-            min = -(1 << fixed_point_position);
-            max = (1 << fixed_point_position);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("DataType not supported.");
-    }
-    std::uniform_real_distribution<> distribution(min, max);
-    library->fill(Accessor(src), distribution, 0);
-
-    // Compute function
-    pool.run();
-
-    return dst;
-}
-
-TensorShape get_output_shape(TensorShape in_shape, const PoolingLayerInfo &pool_info)
-{
-    TensorShape out_shape(in_shape);
-    const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(in_shape.x(),
-                                                                                             in_shape.y(),
-                                                                                             pool_info.pool_size(),
-                                                                                             pool_info.pool_size(),
-                                                                                             pool_info.pad_stride_info());
-    out_shape.set(0, scaled_dims.first);
-    out_shape.set(1, scaled_dims.second);
-    return out_shape;
-}
-} // namespace
-
-#ifndef DOXYGEN_SKIP_THIS
-BOOST_AUTO_TEST_SUITE(NEON)
-BOOST_AUTO_TEST_SUITE(PoolingLayer)
-
-BOOST_AUTO_TEST_SUITE(Float)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
-                     RandomPoolingLayerDataset() * boost::unit_test::data::make(DataType::F32),
-                     obj, dt)
-{
-    // Compute function
-    Tensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_f32, 0);
-}
-
-BOOST_DATA_TEST_CASE(RunSmall7x7,
-                     SmallShapes() * CNNFloatDataTypes() * PoolingTypes() * boost::unit_test::data::make({ 2, 3, 7 }) * boost::unit_test::data::make({ 1, 2 }) * boost::unit_test::data::make({ 0, 1 }),
-                     src_shape, dt, pool_type, pool_size, pool_stride, pool_pad)
-{
-    PoolingLayerInfo pool_info(pool_type, pool_size, PadStrideInfo(pool_stride, pool_stride, pool_pad, pool_pad, DimensionRoundingType::CEIL));
-    TensorShape      dst_shape = get_output_shape(src_shape, pool_info);
-
-    // Compute function
-    Tensor dst = compute_pooling_layer(src_shape, dst_shape, dt, pool_info);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_pooling_layer(src_shape, dst_shape, dt, pool_info);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_f32, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-#ifdef ARM_COMPUTE_ENABLE_FP16
-BOOST_AUTO_TEST_SUITE(Float16)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
-                     RandomPoolingLayerDataset() * boost::unit_test::data::make(DataType::F16),
-                     obj, dt)
-{
-    // Compute function
-    Tensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_f16, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* ARM_COMPUTE_ENABLE_FP16 */
-
-BOOST_AUTO_TEST_SUITE(Quantized)
-BOOST_AUTO_TEST_SUITE(QS8)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
-                     RandomPoolingLayerDataset() * boost::unit_test::data::make(DataType::QS8) * boost::unit_test::data::xrange(1, 5),
-                     obj, dt, fixed_point_position)
-{
-    // Compute function
-    Tensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info, fixed_point_position);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_q, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(QS16)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(RandomDataset,
-                     RandomPoolingLayerDataset() * boost::unit_test::data::make(DataType::QS16) * boost::unit_test::data::xrange(1, 13),
-                     obj, dt, fixed_point_position)
-{
-    // Compute function
-    Tensor dst = compute_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_pooling_layer(obj.src_shape, obj.dst_shape, dt, obj.info, fixed_point_position);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_q, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation/Reference.cpp b/tests/validation/Reference.cpp
index 1ea017e..6da9211 100644
--- a/tests/validation/Reference.cpp
+++ b/tests/validation/Reference.cpp
@@ -461,39 +461,6 @@
     return ref_dst;
 }
 
-RawTensor Reference::compute_reference_pooling_layer(const TensorShape &shape_in, const TensorShape &shape_out, DataType dt, PoolingLayerInfo pool_info, int fixed_point_position)
-{
-    // Create reference
-    RawTensor ref_src(shape_in, dt, 1, fixed_point_position);
-    RawTensor ref_dst(shape_out, dt, 1, fixed_point_position);
-
-    // Fill reference
-    int min = 0;
-    int max = 0;
-    switch(dt)
-    {
-        case DataType::F32:
-        case DataType::F16:
-            min = -1;
-            max = 1;
-            break;
-        case DataType::QS8:
-        case DataType::QS16:
-            min = -(1 << fixed_point_position);
-            max = (1 << fixed_point_position);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("DataType not supported.");
-    }
-    std::uniform_real_distribution<> distribution(min, max);
-    library->fill(ref_src, distribution, 0.0);
-
-    // Compute reference
-    ReferenceCPP::pooling_layer(ref_src, ref_dst, pool_info);
-
-    return ref_dst;
-}
-
 RawTensor Reference::compute_reference_roi_pooling_layer(const TensorShape &shape, DataType dt, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info)
 {
     TensorShape shape_dst;
diff --git a/tests/validation/Reference.h b/tests/validation/Reference.h
index 288dc0e..430c423 100644
--- a/tests/validation/Reference.h
+++ b/tests/validation/Reference.h
@@ -293,17 +293,6 @@
      * @return Computed raw tensor.
      */
     static RawTensor compute_reference_batch_normalization_layer(const TensorShape &shape0, const TensorShape &shape1, DataType dt, float epsilon, int fixed_point_position = 0);
-    /** Compute reference pooling layer.
-      *
-      * @param[in] shape_in             Shape of the input tensor.
-      * @param[in] shape_out            Shape of the output tensor.
-      * @param[in] dt                   Data type of input and output tensors.
-      * @param[in] pool_info            Pooling Layer information.
-      * @param[in] fixed_point_position (Optional) Number of bits for the fractional part of the fixed point numbers.
-      *
-      * @return Computed raw tensor.
-      */
-    static RawTensor compute_reference_pooling_layer(const TensorShape &shape_in, const TensorShape &shape_out, DataType dt, PoolingLayerInfo pool_info, int fixed_point_position = 0);
     /** Compute reference roi pooling layer.
      *
      * @param[in] shape     Shape of the input tensor.
diff --git a/tests/validation/ReferenceCPP.cpp b/tests/validation/ReferenceCPP.cpp
index 58b47f9..4c831eb 100644
--- a/tests/validation/ReferenceCPP.cpp
+++ b/tests/validation/ReferenceCPP.cpp
@@ -281,14 +281,6 @@
     boost::apply_visitor(tensor_visitors::batch_normalization_layer_visitor(s, m, v, b, g, epsilon, fixed_point_position), d);
 }
 
-// Pooling Layer
-void ReferenceCPP::pooling_layer(const RawTensor &src, RawTensor &dst, PoolingLayerInfo pool_info)
-{
-    const TensorVariant s = TensorFactory::get_tensor(src);
-    TensorVariant       d = TensorFactory::get_tensor(dst);
-    boost::apply_visitor(tensor_visitors::pooling_layer_visitor(s, pool_info), d);
-}
-
 // ROI Pooling Layer
 void ReferenceCPP::roi_pooling_layer(const RawTensor &src, RawTensor &dst, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info)
 {
diff --git a/tests/validation/ReferenceCPP.h b/tests/validation/ReferenceCPP.h
index 29612d1..96aade9 100644
--- a/tests/validation/ReferenceCPP.h
+++ b/tests/validation/ReferenceCPP.h
@@ -259,13 +259,6 @@
      */
     static void batch_normalization_layer(const RawTensor &src, RawTensor &dst, const RawTensor &mean, const RawTensor &var, const RawTensor &beta, const RawTensor &gamma, float epsilon,
                                           int fixed_point_position = 0);
-    /** Pooling layer of @p src based on the information from @p pool_info.
-     *
-     * @param[in]  src       Input tensor.
-     * @param[out] dst       Result tensor.
-     * @param[in]  pool_info Pooling Layer information.
-     */
-    static void pooling_layer(const RawTensor &src, RawTensor &dst, PoolingLayerInfo pool_info);
     /** ROI Pooling layer of @p src based on the information from @p pool_info and @p rois.
      *
      * @param[in]  src       Input tensor.
diff --git a/tests/validation/TensorOperations.h b/tests/validation/TensorOperations.h
index f5be139..e68a344 100644
--- a/tests/validation/TensorOperations.h
+++ b/tests/validation/TensorOperations.h
@@ -1071,229 +1071,6 @@
     }
 }
 
-// Pooling layer
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
-void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_info)
-{
-    const int   pool_size     = pool_info.pool_size();
-    PoolingType type          = pool_info.pool_type();
-    int         pool_stride_x = 0;
-    int         pool_stride_y = 0;
-    int         pad_x         = 0;
-    int         pad_y         = 0;
-    std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info().stride();
-    std::tie(pad_x, pad_y)                 = pool_info.pad_stride_info().pad();
-
-    const int w_in = static_cast<int>(in.shape()[0]);
-    const int h_in = static_cast<int>(in.shape()[1]);
-
-    const int w_out = static_cast<int>(out.shape()[0]);
-    const int h_out = static_cast<int>(out.shape()[1]);
-
-    int upper_dims = in.shape().total_size() / (w_in * h_in);
-
-    int pooled_w = 0;
-    int pooled_h = 0;
-    if(pool_info.pad_stride_info().round() == DimensionRoundingType::CEIL)
-    {
-        pooled_w = static_cast<int>(ceil(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
-        pooled_h = static_cast<int>(ceil(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
-    }
-    else
-    {
-        pooled_w = static_cast<int>(floor(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
-        pooled_h = static_cast<int>(floor(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
-    }
-
-    if((pooled_w - 1) * pool_stride_x >= w_in + pad_x)
-    {
-        --pooled_w;
-    }
-    if((pooled_h - 1) * pool_stride_y >= h_in + pad_y)
-    {
-        --pooled_h;
-    }
-
-    if(type == PoolingType::MAX)
-    {
-        for(int r = 0; r < upper_dims; ++r)
-        {
-            for(int h = 0; h < pooled_h; ++h)
-            {
-                for(int w = 0; w < pooled_w; ++w)
-                {
-                    int wstart = w * pool_stride_x - pad_x;
-                    int hstart = h * pool_stride_y - pad_y;
-                    int wend   = std::min(wstart + pool_size, w_in);
-                    int hend   = std::min(hstart + pool_size, h_in);
-                    wstart     = std::max(wstart, 0);
-                    hstart     = std::max(hstart, 0);
-
-                    T max_val = std::numeric_limits<T>::lowest();
-                    for(int y = hstart; y < hend; ++y)
-                    {
-                        for(int x = wstart; x < wend; ++x)
-                        {
-                            const T val = in[r * h_in * w_in + y * w_in + x];
-                            if(val > max_val)
-                            {
-                                max_val = val;
-                            }
-                        }
-                    }
-
-                    out[r * h_out * w_out + h * pooled_w + w] = max_val;
-                }
-            }
-        }
-    }
-    else // Average pooling
-    {
-        for(int r = 0; r < upper_dims; ++r)
-        {
-            for(int h = 0; h < pooled_h; ++h)
-            {
-                for(int w = 0; w < pooled_w; ++w)
-                {
-                    T   avg_val(0);
-                    int wstart = w * pool_stride_x - pad_x;
-                    int hstart = h * pool_stride_y - pad_y;
-                    int wend   = std::min(wstart + pool_size, w_in + pad_x);
-                    int hend   = std::min(hstart + pool_size, h_in + pad_y);
-                    int pool   = (hend - hstart) * (wend - wstart);
-                    wstart     = std::max(wstart, 0);
-                    hstart     = std::max(hstart, 0);
-                    wend       = std::min(wend, w_in);
-                    hend       = std::min(hend, h_in);
-
-                    for(int y = hstart; y < hend; ++y)
-                    {
-                        for(int x = wstart; x < wend; ++x)
-                        {
-                            avg_val += in[r * h_in * w_in + y * w_in + x];
-                        }
-                    }
-                    out[r * h_out * w_out + h * pooled_w + w] = avg_val / pool;
-                }
-            }
-        }
-    }
-}
-
-// Pooling layer
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
-void pooling_layer(const Tensor<T> &in, Tensor<T> &out, PoolingLayerInfo pool_info)
-{
-    const int   pool_size     = pool_info.pool_size();
-    PoolingType type          = pool_info.pool_type();
-    int         pool_stride_x = 0;
-    int         pool_stride_y = 0;
-    int         pad_x         = 0;
-    int         pad_y         = 0;
-    std::tie(pool_stride_x, pool_stride_y) = pool_info.pad_stride_info().stride();
-    std::tie(pad_x, pad_y)                 = pool_info.pad_stride_info().pad();
-
-    const int w_in = static_cast<int>(in.shape()[0]);
-    const int h_in = static_cast<int>(in.shape()[1]);
-
-    const int w_out = static_cast<int>(out.shape()[0]);
-    const int h_out = static_cast<int>(out.shape()[1]);
-
-    int upper_dims = in.shape().total_size() / (w_in * h_in);
-
-    int pooled_w = 0;
-    int pooled_h = 0;
-    if(pool_info.pad_stride_info().round() == DimensionRoundingType::CEIL)
-    {
-        pooled_w = static_cast<int>(ceil(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
-        pooled_h = static_cast<int>(ceil(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
-    }
-    else
-    {
-        pooled_w = static_cast<int>(floor(static_cast<float>(w_in + 2 * pad_x - pool_size) / pool_stride_x)) + 1;
-        pooled_h = static_cast<int>(floor(static_cast<float>(h_in + 2 * pad_y - pool_size) / pool_stride_y)) + 1;
-    }
-
-    if((pooled_w - 1) * pool_stride_x >= w_in + pad_x)
-    {
-        --pooled_w;
-    }
-    if((pooled_h - 1) * pool_stride_y >= h_in + pad_y)
-    {
-        --pooled_h;
-    }
-
-    if(type == PoolingType::MAX)
-    {
-        for(int r = 0; r < upper_dims; ++r)
-        {
-            for(int h = 0; h < pooled_h; ++h)
-            {
-                for(int w = 0; w < pooled_w; ++w)
-                {
-                    int wstart = w * pool_stride_x - pad_x;
-                    int hstart = h * pool_stride_y - pad_y;
-                    int wend   = std::min(wstart + pool_size, w_in);
-                    int hend   = std::min(hstart + pool_size, h_in);
-                    wstart     = std::max(wstart, 0);
-                    hstart     = std::max(hstart, 0);
-
-                    T max_val = std::numeric_limits<T>::lowest();
-                    for(int y = hstart; y < hend; ++y)
-                    {
-                        for(int x = wstart; x < wend; ++x)
-                        {
-                            T val = in[r * h_in * w_in + y * w_in + x];
-                            if(val > max_val)
-                            {
-                                max_val = val;
-                            }
-                        }
-                    }
-
-                    out[r * h_out * w_out + h * pooled_w + w] = max_val;
-                }
-            }
-        }
-    }
-    else // Average pooling
-    {
-        for(int r = 0; r < upper_dims; ++r)
-        {
-            for(int h = 0; h < pooled_h; ++h)
-            {
-                for(int w = 0; w < pooled_w; ++w)
-                {
-                    int wstart = w * pool_stride_x - pad_x;
-                    int hstart = h * pool_stride_y - pad_y;
-                    int wend   = std::min(wstart + pool_size, w_in + pad_x);
-                    int hend   = std::min(hstart + pool_size, h_in + pad_y);
-                    int pool   = (hend - hstart) * (wend - wstart);
-                    wstart     = std::max(wstart, 0);
-                    hstart     = std::max(hstart, 0);
-                    wend       = std::min(wend, w_in);
-                    hend       = std::min(hend, h_in);
-
-                    using namespace fixed_point_arithmetic;
-
-                    const int            fixed_point_position = in.fixed_point_position();
-                    const fixed_point<T> invpool_fp(1.f / static_cast<float>(pool), fixed_point_position);
-                    fixed_point<T>       avg_val(0, fixed_point_position, true);
-                    for(int y = hstart; y < hend; ++y)
-                    {
-                        for(int x = wstart; x < wend; ++x)
-                        {
-                            const fixed_point<T> in_fp(in[r * h_in * w_in + y * w_in + x], fixed_point_position, true);
-                            avg_val = add(avg_val, in_fp);
-                        }
-                    }
-                    out[r * h_out * w_out + h * pooled_w + w] = mul(avg_val, invpool_fp).raw();
-                }
-            }
-        }
-    }
-}
-
 // ROI Pooling layer
 template <typename T>
 void roi_pooling_layer(const Tensor<T> &in, Tensor<T> &out, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info)
diff --git a/tests/validation/TensorVisitors.h b/tests/validation/TensorVisitors.h
index 732cd0e..a15d2ad 100644
--- a/tests/validation/TensorVisitors.h
+++ b/tests/validation/TensorVisitors.h
@@ -233,27 +233,6 @@
     int                  _fixed_point_position;
 };
 
-// Pooling layer
-struct pooling_layer_visitor : public boost::static_visitor<>
-{
-public:
-    explicit pooling_layer_visitor(const TensorVariant &in, PoolingLayerInfo pool_info)
-        : _in(in), _pool_info(pool_info)
-    {
-    }
-
-    template <typename T>
-    void operator()(Tensor<T> &out) const
-    {
-        const Tensor<T> &in = boost::get<Tensor<T>>(_in);
-        tensor_operations::pooling_layer(in, out, _pool_info);
-    }
-
-private:
-    const TensorVariant &_in;
-    PoolingLayerInfo     _pool_info;
-};
-
 // ROI Pooling layer
 struct roi_pooling_layer_visitor : public boost::static_visitor<>
 {
diff --git a/tests/validation_new/CL/PoolingLayer.cpp b/tests/validation_new/CL/PoolingLayer.cpp
new file mode 100644
index 0000000..d38a3b2
--- /dev/null
+++ b/tests/validation_new/CL/PoolingLayer.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLPoolingLayer.h"
+#include "framework/Asserts.h"
+#include "framework/Macros.h"
+#include "framework/datasets/Datasets.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets_new/PoolingTypesDataset.h"
+#include "tests/datasets_new/ShapeDatasets.h"
+#include "tests/validation_new/Validation.h"
+#include "tests/validation_new/fixtures/PoolingLayerFixture.h"
+#include "tests/validation_new/half.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+/** Input data set for float data types */
+const auto PoolingLayerDatasetFP = combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { 2, 3, 7 })),
+                                           framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) }));
+
+/** Input data set for quantized data types */
+const auto PoolingLayerDatasetQS = combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { 2, 3 })),
+                                           framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) }));
+
+constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for float types */
+constexpr AbsoluteTolerance<float> tolerance_f16(0.01f);  /**< Tolerance value for comparing reference's output against implementation's output for float types */
+constexpr AbsoluteTolerance<float> tolerance_qs8(3);      /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
+constexpr AbsoluteTolerance<float> tolerance_qs16(6);     /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(PoolingLayer)
+
+template <typename T>
+using CLPoolingLayerFixture = PoolingLayerValidationFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), combine(PoolingLayerDatasetFP, framework::dataset::make("DataType",
+                                                                                                    DataType::F32))))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP, framework::dataset::make("DataType",
+                                                                                                        DataType::F32))))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixture<half_float::half>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), combine(PoolingLayerDatasetFP,
+                                                                                                               framework::dataset::make("DataType", DataType::F16))))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixture<half_float::half>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP,
+                                                                                                                   framework::dataset::make("DataType", DataType::F16))))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+template <typename T>
+using CLPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLPoolingLayer, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QS8)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS,
+                                                                                                                       framework::dataset::make("DataType", DataType::QS8))),
+                                                                                                               framework::dataset::make("FractionalBits", 1, 4)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_qs8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetQS,
+                                                                                                                   framework::dataset::make("DataType", DataType::QS8))),
+                                                                                                                   framework::dataset::make("FractionalBits", 1, 4)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_qs8);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(QS16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS,
+                                                                                                                        framework::dataset::make("DataType", DataType::QS16))),
+                                                                                                                framework::dataset::make("FractionalBits", 1, 12)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_qs16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, CLPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetQS,
+                                                                                                                    framework::dataset::make("DataType", DataType::QS16))),
+                                                                                                                    framework::dataset::make("FractionalBits", 1, 12)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_qs16);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation_new/CPP/PoolingLayer.cpp b/tests/validation_new/CPP/PoolingLayer.cpp
new file mode 100644
index 0000000..5464885
--- /dev/null
+++ b/tests/validation_new/CPP/PoolingLayer.cpp
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "PoolingLayer.h"
+
+#include "tests/validation_new/FixedPoint.h"
+#include "tests/validation_new/half.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+namespace
+{
+TensorShape calculate_output_shape(TensorShape shape, PoolingLayerInfo info)
+{
+    TensorShape dst_shape = shape;
+    const std::pair<unsigned int, unsigned int> scaled_dims = arm_compute::scaled_dimensions(shape.x(),
+                                                                                             shape.y(),
+                                                                                             info.pool_size(),
+                                                                                             info.pool_size(),
+                                                                                             info.pad_stride_info());
+    dst_shape.set(0, scaled_dims.first);
+    dst_shape.set(1, scaled_dims.second);
+
+    return dst_shape;
+}
+} // namespace
+
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type>
+SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
+{
+    const int   pool_size     = info.pool_size();
+    PoolingType type          = info.pool_type();
+    int         pool_stride_x = info.pad_stride_info().stride().first;
+    int         pool_stride_y = info.pad_stride_info().stride().second;
+    int         pad_x         = info.pad_stride_info().pad().first;
+    int         pad_y         = info.pad_stride_info().pad().second;
+
+    const auto w_src      = static_cast<int>(src.shape()[0]);
+    const auto h_src      = static_cast<int>(src.shape()[1]);
+    const int  upper_dims = src.shape().total_size() / (w_src * h_src);
+
+    // Create reference
+    SimpleTensor<T> dst{ calculate_output_shape(src.shape(), info), src.data_type(), 1, src.fixed_point_position() };
+
+    const auto w_dst = static_cast<int>(dst.shape()[0]);
+    const auto h_dst = static_cast<int>(dst.shape()[1]);
+
+    if(type == PoolingType::MAX)
+    {
+        for(int r = 0; r < upper_dims; ++r)
+        {
+            for(int h = 0; h < h_dst; ++h)
+            {
+                for(int w = 0; w < w_dst; ++w)
+                {
+                    int wstart = w * pool_stride_x - pad_x;
+                    int hstart = h * pool_stride_y - pad_y;
+                    int wend   = std::min(wstart + pool_size, w_src);
+                    int hend   = std::min(hstart + pool_size, h_src);
+                    wstart     = std::max(wstart, 0);
+                    hstart     = std::max(hstart, 0);
+
+                    T max_val = std::numeric_limits<T>::lowest();
+                    for(int y = hstart; y < hend; ++y)
+                    {
+                        for(int x = wstart; x < wend; ++x)
+                        {
+                            const T val = src[r * h_src * w_src + y * w_src + x];
+                            if(val > max_val)
+                            {
+                                max_val = val;
+                            }
+                        }
+                    }
+
+                    dst[r * h_dst * w_dst + h * w_dst + w] = max_val;
+                }
+            }
+        }
+    }
+    else // Average pooling
+    {
+        for(int r = 0; r < upper_dims; ++r)
+        {
+            for(int h = 0; h < h_dst; ++h)
+            {
+                for(int w = 0; w < w_dst; ++w)
+                {
+                    T   avg_val(0);
+                    int wstart = w * pool_stride_x - pad_x;
+                    int hstart = h * pool_stride_y - pad_y;
+                    int wend   = std::min(wstart + pool_size, w_src + pad_x);
+                    int hend   = std::min(hstart + pool_size, h_src + pad_y);
+                    int pool   = (hend - hstart) * (wend - wstart);
+                    wstart     = std::max(wstart, 0);
+                    hstart     = std::max(hstart, 0);
+                    wend       = std::min(wend, w_src);
+                    hend       = std::min(hend, h_src);
+
+                    for(int y = hstart; y < hend; ++y)
+                    {
+                        for(int x = wstart; x < wend; ++x)
+                        {
+                            avg_val += src[r * h_src * w_src + y * w_src + x];
+                        }
+                    }
+                    dst[r * h_dst * w_dst + h * w_dst + w] = avg_val / pool;
+                }
+            }
+        }
+    }
+
+    return dst;
+}
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type>
+SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info)
+{
+    const int   pool_size     = info.pool_size();
+    PoolingType type          = info.pool_type();
+    int         pool_stride_x = info.pad_stride_info().stride().first;
+    int         pool_stride_y = info.pad_stride_info().stride().second;
+    int         pad_x         = info.pad_stride_info().pad().first;
+    int         pad_y         = info.pad_stride_info().pad().second;
+
+    const auto w_src      = static_cast<int>(src.shape()[0]);
+    const auto h_src      = static_cast<int>(src.shape()[1]);
+    const int  upper_dims = src.shape().total_size() / (w_src * h_src);
+
+    // Create reference
+    SimpleTensor<T> dst{ calculate_output_shape(src.shape(), info), src.data_type(), 1, src.fixed_point_position() };
+
+    const auto w_dst = static_cast<int>(dst.shape()[0]);
+    const auto h_dst = static_cast<int>(dst.shape()[1]);
+
+    if(type == PoolingType::MAX)
+    {
+        for(int r = 0; r < upper_dims; ++r)
+        {
+            for(int h = 0; h < h_dst; ++h)
+            {
+                for(int w = 0; w < w_dst; ++w)
+                {
+                    int wstart = w * pool_stride_x - pad_x;
+                    int hstart = h * pool_stride_y - pad_y;
+                    int wend   = std::min(wstart + pool_size, w_src);
+                    int hend   = std::min(hstart + pool_size, h_src);
+                    wstart     = std::max(wstart, 0);
+                    hstart     = std::max(hstart, 0);
+
+                    T max_val = std::numeric_limits<T>::lowest();
+                    for(int y = hstart; y < hend; ++y)
+                    {
+                        for(int x = wstart; x < wend; ++x)
+                        {
+                            const T val = src[r * h_src * w_src + y * w_src + x];
+                            if(val > max_val)
+                            {
+                                max_val = val;
+                            }
+                        }
+                    }
+
+                    dst[r * h_dst * w_dst + h * w_dst + w] = max_val;
+                }
+            }
+        }
+    }
+    else // Average pooling
+    {
+        for(int r = 0; r < upper_dims; ++r)
+        {
+            for(int h = 0; h < h_dst; ++h)
+            {
+                for(int w = 0; w < w_dst; ++w)
+                {
+                    int wstart = w * pool_stride_x - pad_x;
+                    int hstart = h * pool_stride_y - pad_y;
+                    int wend   = std::min(wstart + pool_size, w_src + pad_x);
+                    int hend   = std::min(hstart + pool_size, h_src + pad_y);
+                    int pool   = (hend - hstart) * (wend - wstart);
+                    wstart     = std::max(wstart, 0);
+                    hstart     = std::max(hstart, 0);
+                    wend       = std::min(wend, w_src);
+                    hend       = std::min(hend, h_src);
+
+                    using namespace fixed_point_arithmetic;
+
+                    const int            fixed_point_position = src.fixed_point_position();
+                    const fixed_point<T> invpool_fp(1.f / static_cast<float>(pool), fixed_point_position);
+                    fixed_point<T>       avg_val(0, fixed_point_position, true);
+
+                    for(int y = hstart; y < hend; ++y)
+                    {
+                        for(int x = wstart; x < wend; ++x)
+                        {
+                            const fixed_point<T> in_fp(src[r * h_src * w_src + y * w_src + x], fixed_point_position, true);
+                            avg_val = add(avg_val, in_fp);
+                        }
+                    }
+                    dst[r * h_dst * w_dst + h * w_dst + w] = mul(avg_val, invpool_fp).raw();
+                }
+            }
+        }
+    }
+
+    return dst;
+}
+
+template SimpleTensor<float> pooling_layer(const SimpleTensor<float> &src, PoolingLayerInfo info);
+template SimpleTensor<half_float::half> pooling_layer(const SimpleTensor<half_float::half> &src, PoolingLayerInfo info);
+template SimpleTensor<qint8_t> pooling_layer(const SimpleTensor<qint8_t> &src, PoolingLayerInfo info);
+template SimpleTensor<qint16_t> pooling_layer(const SimpleTensor<qint16_t> &src, PoolingLayerInfo info);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation_new/CPP/PoolingLayer.h b/tests/validation_new/CPP/PoolingLayer.h
new file mode 100644
index 0000000..0935fb0
--- /dev/null
+++ b/tests/validation_new/CPP/PoolingLayer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_POOLING_LAYER_H__
+#define __ARM_COMPUTE_TEST_POOLING_LAYER_H__
+
+#include "tests/SimpleTensor.h"
+#include "tests/validation_new/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type = 0>
+SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info);
+
+template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
+SimpleTensor<T> pooling_layer(const SimpleTensor<T> &src, PoolingLayerInfo info);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_POOLING_LAYER_H__ */
diff --git a/tests/validation_new/NEON/PoolingLayer.cpp b/tests/validation_new/NEON/PoolingLayer.cpp
new file mode 100644
index 0000000..20fce3d
--- /dev/null
+++ b/tests/validation_new/NEON/PoolingLayer.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEPoolingLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "framework/Asserts.h"
+#include "framework/Macros.h"
+#include "framework/datasets/Datasets.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets_new/PoolingTypesDataset.h"
+#include "tests/datasets_new/ShapeDatasets.h"
+#include "tests/validation_new/Validation.h"
+#include "tests/validation_new/fixtures/PoolingLayerFixture.h"
+#include "tests/validation_new/half.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+/** Input data set for float data types */
+const auto PoolingLayerDatasetFP = combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { 2, 3, 7 })),
+                                           framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) }));
+
+/** Input data set for quantized data types */
+const auto PoolingLayerDatasetQS = combine(combine(datasets::PoolingTypes(), framework::dataset::make("PoolingSize", { 2, 3 })),
+                                           framework::dataset::make("PadStride", { PadStrideInfo(1, 1, 0, 0), PadStrideInfo(2, 1, 0, 0), PadStrideInfo(1, 2, 1, 1), PadStrideInfo(2, 2, 1, 0) }));
+
+constexpr AbsoluteTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for float types */
+#ifdef ARM_COMPUTE_ENABLE_FP16
+constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for float types */
+#endif                                                   /* ARM_COMPUTE_ENABLE_FP16 */
+constexpr AbsoluteTolerance<float> tolerance_qs8(0);     /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
+constexpr AbsoluteTolerance<float> tolerance_qs16(0);    /**< Tolerance value for comparing reference's output against implementation's output for quantized input */
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(PoolingLayer)
+
+//TODO(COMPMID-415): Configuration tests?
+
+template <typename T>
+using NEPoolingLayerFixture = PoolingLayerValidationFixture<Tensor, Accessor, NEPoolingLayer, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixture<float>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), combine(PoolingLayerDatasetFP, framework::dataset::make("DataType",
+                                                                                                    DataType::F32))))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEPoolingLayerFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP, framework::dataset::make("DataType",
+                                                                                                        DataType::F32))))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END()
+
+#ifdef ARM_COMPUTE_ENABLE_FP16
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixture<half_float::half>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), combine(PoolingLayerDatasetFP,
+                                                                                                               framework::dataset::make("DataType", DataType::F16))))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEPoolingLayerFixture<half_float::half>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), combine(PoolingLayerDatasetFP,
+                                                                                                                   framework::dataset::make("DataType", DataType::F16))))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f16);
+}
+TEST_SUITE_END()
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
+TEST_SUITE_END()
+
+template <typename T>
+using NEPoolingLayerFixedPointFixture = PoolingLayerValidationFixedPointFixture<Tensor, Accessor, NEPoolingLayer, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QS8)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS,
+                                                                                                                       framework::dataset::make("DataType", DataType::QS8))),
+                                                                                                               framework::dataset::make("FractionalBits", 1, 5)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_qs8);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEPoolingLayerFixedPointFixture<int8_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetQS,
+                                                                                                                   framework::dataset::make("DataType", DataType::QS8))),
+                                                                                                                   framework::dataset::make("FractionalBits", 1, 5)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_qs8);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(QS16)
+FIXTURE_DATA_TEST_CASE(RunSmall, NEPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::ALL, combine(combine(datasets::SmallShapes(), combine(PoolingLayerDatasetQS,
+                                                                                                                        framework::dataset::make("DataType", DataType::QS16))),
+                                                                                                                framework::dataset::make("FractionalBits", 1, 13)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_qs16);
+}
+FIXTURE_DATA_TEST_CASE(RunLarge, NEPoolingLayerFixedPointFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeShapes(), combine(PoolingLayerDatasetQS,
+                                                                                                                    framework::dataset::make("DataType", DataType::QS16))),
+                                                                                                                    framework::dataset::make("FractionalBits", 1, 13)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_qs16);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation_new/fixtures/PoolingLayerFixture.h b/tests/validation_new/fixtures/PoolingLayerFixture.h
new file mode 100644
index 0000000..c0c818f
--- /dev/null
+++ b/tests/validation_new/fixtures/PoolingLayerFixture.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "framework/Asserts.h"
+#include "framework/Fixture.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/validation_new/CPP/PoolingLayer.h"
+
+#include <random>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class PoolingLayerValidationFixedPointFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, DataType data_type, int fractional_bits)
+    {
+        _fractional_bits = fractional_bits;
+        PoolingLayerInfo info(pool_type, pool_size, pad_stride_info);
+
+        _target    = compute_target(shape, info, data_type, fractional_bits);
+        _reference = compute_reference(shape, info, data_type, fractional_bits);
+    }
+
+protected:
+    template <typename U>
+    void fill(U &&tensor)
+    {
+        if(_fractional_bits == 0)
+        {
+            std::uniform_real_distribution<> distribution(-1.f, 1.f);
+            library->fill(tensor, distribution, 0);
+        }
+        else
+        {
+            const int                       one_fixed = 1 << _fractional_bits;
+            std::uniform_int_distribution<> distribution(-one_fixed, one_fixed);
+            library->fill(tensor, distribution, 0);
+        }
+    }
+
+    TensorType compute_target(const TensorShape &shape, PoolingLayerInfo info, DataType data_type, int fixed_point_position = 0)
+    {
+        // Create tensors
+        TensorType src = create_tensor<TensorType>(shape, data_type, 1, fixed_point_position);
+        TensorType dst;
+
+        // Create and configure function
+        FunctionType pool_layer;
+        pool_layer.configure(&src, &dst, info);
+
+        ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Allocate tensors
+        src.allocator()->allocate();
+        dst.allocator()->allocate();
+
+        ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Fill tensors
+        fill(AccessorType(src));
+
+        // Compute function
+        pool_layer.run();
+
+        return dst;
+    }
+
+    SimpleTensor<T> compute_reference(const TensorShape &shape, PoolingLayerInfo info, DataType data_type, int fixed_point_position = 0)
+    {
+        // Create reference
+        SimpleTensor<T> src{ shape, data_type, 1, fixed_point_position };
+
+        // Fill reference
+        fill(src);
+
+        return reference::pooling_layer<T>(src, info);
+    }
+
+    TensorType      _target{};
+    SimpleTensor<T> _reference{};
+    int             _fractional_bits{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class PoolingLayerValidationFixture : public PoolingLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape, PoolingType pool_type, int pool_size, PadStrideInfo pad_stride_info, DataType data_type)
+    {
+        PoolingLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape, pool_type, pool_size, pad_stride_info, data_type, 0);
+    }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_POOLING_LAYER_FIXTURE */