COMPMID-577: Implement CL validation for GaussianPyramid

Change-Id: If879cbe15b14d97818c24d44b29fc69b6c8cb686
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/127601
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/arm_compute/core/CL/kernels/CLGaussianPyramidKernel.h b/arm_compute/core/CL/kernels/CLGaussianPyramidKernel.h
index 43d66c3..425f847 100644
--- a/arm_compute/core/CL/kernels/CLGaussianPyramidKernel.h
+++ b/arm_compute/core/CL/kernels/CLGaussianPyramidKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -49,19 +49,17 @@
 
     /** Initialise the kernel's source, destination and border mode.
      *
-     * @param[in]  input            Source tensor. Data types supported: U8.
-     * @param[out] output           Destination tensor. Output should have half the input width. Data types supported: U16.
-     * @param[in]  border_undefined True if the border mode is undefined. False if it's replicate or constant.
+     * @param[in]  input  Source tensor. Data types supported: U8.
+     * @param[out] output Destination tensor. Output should have half the input width. Data types supported: U16.
      */
-    void configure(const ICLTensor *input, ICLTensor *output, bool border_undefined);
+    void configure(const ICLTensor *input, ICLTensor *output);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
     BorderSize border_size() const override;
 
 private:
-    BorderSize _border_size;
-    int        _l2_load_offset;
+    int _l2_load_offset;
 };
 
 /** OpenCL kernel to perform a Gaussian filter and half scaling across height (vertical pass) */
@@ -83,11 +81,10 @@
 
     /** Initialise the kernel's source, destination and border mode.
      *
-     * @param[in]  input            Source tensor. Data types supported: U16.
-     * @param[out] output           Destination tensor. Output should have half the input height. Data types supported: U8.
-     * @param[in]  border_undefined True if the border mode is undefined. False if it's replicate or constant.
+     * @param[in]  input  Source tensor. Data types supported: U16.
+     * @param[out] output Destination tensor. Output should have half the input height. Data types supported: U8.
      */
-    void configure(const ICLTensor *input, ICLTensor *output, bool border_undefined);
+    void configure(const ICLTensor *input, ICLTensor *output);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/NEON/kernels/NEGaussianPyramidKernel.h b/arm_compute/core/NEON/kernels/NEGaussianPyramidKernel.h
index ac22934..1446ca8 100644
--- a/arm_compute/core/NEON/kernels/NEGaussianPyramidKernel.h
+++ b/arm_compute/core/NEON/kernels/NEGaussianPyramidKernel.h
@@ -54,7 +54,7 @@
     /** Initialise the kernel's source, destination and border mode.
      *
      * @param[in]  input  Source tensor. Data type supported: U8.
-     * @param[out] output Destination tensor. Data type supported: S16.
+     * @param[out] output Destination tensor. Output should have half the input width. Data type supported: S16.
      */
     void configure(const ITensor *input, ITensor *output);
 
@@ -90,7 +90,7 @@
     /** Initialise the kernel's source, destination and border mode.
      *
      * @param[in]  input  Source tensor. Data type supported: S16.
-     * @param[out] output Destination tensor. Data type supported: U8.
+     * @param[out] output Destination tensor. Output should have half the input height. Data type supported: U8.
      */
     void configure(const ITensor *input, ITensor *output);
 
diff --git a/arm_compute/runtime/CL/functions/CLGaussianPyramid.h b/arm_compute/runtime/CL/functions/CLGaussianPyramid.h
index 9793519..0110adf 100644
--- a/arm_compute/runtime/CL/functions/CLGaussianPyramid.h
+++ b/arm_compute/runtime/CL/functions/CLGaussianPyramid.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -90,7 +90,8 @@
     void run() override;
 
 private:
-    std::unique_ptr<CLFillBorderKernel[]>          _border_handler;
+    std::unique_ptr<CLFillBorderKernel[]>          _horizontal_border_handler;
+    std::unique_ptr<CLFillBorderKernel[]>          _vertical_border_handler;
     std::unique_ptr<CLGaussianPyramidHorKernel[]>  _horizontal_reduction;
     std::unique_ptr<CLGaussianPyramidVertKernel[]> _vertical_reduction;
 };
diff --git a/src/core/CL/kernels/CLGaussianPyramidKernel.cpp b/src/core/CL/kernels/CLGaussianPyramidKernel.cpp
index 34a228c..a4fda36 100644
--- a/src/core/CL/kernels/CLGaussianPyramidKernel.cpp
+++ b/src/core/CL/kernels/CLGaussianPyramidKernel.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -32,20 +32,19 @@
 using namespace arm_compute;
 
 CLGaussianPyramidHorKernel::CLGaussianPyramidHorKernel()
-    : _border_size(0), _l2_load_offset(0)
+    : _l2_load_offset(0)
 {
 }
 
 BorderSize CLGaussianPyramidHorKernel::border_size() const
 {
-    return _border_size;
+    return BorderSize(0, 2);
 }
 
-void CLGaussianPyramidHorKernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
+void CLGaussianPyramidHorKernel::configure(const ICLTensor *input, ICLTensor *output)
 {
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U8);
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U16);
-    ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != 2 * output->info()->dimension(0));
     ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) != output->info()->dimension(1));
 
     for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i)
@@ -53,9 +52,8 @@
         ARM_COMPUTE_ERROR_ON(input->info()->dimension(i) != output->info()->dimension(i));
     }
 
-    _input       = input;
-    _output      = output;
-    _border_size = BorderSize(border_undefined ? 0 : 2, 2);
+    _input  = input;
+    _output = output;
 
     // Create kernel
     _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("gaussian1x5_sub_x"));
@@ -64,9 +62,9 @@
     constexpr unsigned int num_elems_processed_per_iteration = 16;
     constexpr unsigned int num_elems_read_per_iteration      = 20;
     constexpr unsigned int num_elems_written_per_iteration   = 8;
-    constexpr float        scale_x                           = 0.5f;
+    const float            scale_x                           = static_cast<float>(output->info()->dimension(0)) / input->info()->dimension(0);
 
-    Window                 win = calculate_max_window_horizontal(*input->info(), Steps(num_elems_processed_per_iteration), border_undefined, border_size());
+    Window                 win = calculate_max_window_horizontal(*input->info(), Steps(num_elems_processed_per_iteration));
     AccessWindowHorizontal output_access(output->info(), 0, num_elems_written_per_iteration, scale_x);
 
     // Sub sampling selects odd pixels (1, 3, 5, ...) for images with even
@@ -95,11 +93,7 @@
                               AccessWindowHorizontal(input->info(), _l2_load_offset, num_elems_read_per_iteration),
                               output_access);
 
-    ValidRegion valid_region = input->info()->valid_region();
-    valid_region.anchor.set(0, std::ceil((valid_region.anchor[0] + (border_undefined ? border_size().left : 0)) / 2.f));
-    valid_region.shape.set(0, (valid_region.shape[0] - (border_undefined ? border_size().right : 0)) / 2 - valid_region.anchor[0]);
-
-    output_access.set_valid_region(win, valid_region);
+    output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
 
     ICLKernel::configure(win);
 }
@@ -139,12 +133,11 @@
     return BorderSize(2, 0);
 }
 
-void CLGaussianPyramidVertKernel::configure(const ICLTensor *input, ICLTensor *output, bool border_undefined)
+void CLGaussianPyramidVertKernel::configure(const ICLTensor *input, ICLTensor *output)
 {
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::U16);
     ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::U8);
     ARM_COMPUTE_ERROR_ON(input->info()->dimension(0) != output->info()->dimension(0));
-    ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) != 2 * output->info()->dimension(1));
 
     for(size_t i = 2; i < Coordinates::num_max_dimensions; ++i)
     {
@@ -163,10 +156,10 @@
     constexpr unsigned int num_elems_written_per_iteration   = 8;
     constexpr unsigned int num_elems_read_per_iteration      = 8;
     constexpr unsigned int num_rows_per_iteration            = 5;
-    constexpr float        scale_y                           = 0.5f;
 
-    Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration, num_rows_processed_per_iteration),
-                                      border_undefined, border_size());
+    const float scale_y = static_cast<float>(output->info()->dimension(1)) / input->info()->dimension(1);
+
+    Window                win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration, num_rows_processed_per_iteration));
     AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_written_per_iteration, num_rows_per_iteration, 1.f, scale_y);
 
     // Determine whether we need to load even or odd rows. See above for a
@@ -182,11 +175,7 @@
                               AccessWindowRectangle(input->info(), 0, _t2_load_offset, num_elems_read_per_iteration, num_rows_per_iteration),
                               output_access);
 
-    ValidRegion valid_region = input->info()->valid_region();
-    valid_region.anchor.set(1, std::ceil((valid_region.anchor[1] + (border_undefined ? border_size().top : 0)) / 2.f));
-    valid_region.shape.set(1, (valid_region.shape[1] - (border_undefined ? border_size().bottom : 0)) / 2 - valid_region.anchor[1]);
-
-    output_access.set_valid_region(win, valid_region);
+    output->info()->set_valid_region(ValidRegion(Coordinates(), output->info()->tensor_shape()));
 
     ICLKernel::configure(win);
 }
diff --git a/src/runtime/CL/functions/CLGaussianPyramid.cpp b/src/runtime/CL/functions/CLGaussianPyramid.cpp
index 4b32954..ddce5fb 100644
--- a/src/runtime/CL/functions/CLGaussianPyramid.cpp
+++ b/src/runtime/CL/functions/CLGaussianPyramid.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -49,7 +49,8 @@
 }
 
 CLGaussianPyramidHalf::CLGaussianPyramidHalf() // NOLINT
-    : _border_handler(),
+    : _horizontal_border_handler(),
+      _vertical_border_handler(),
       _horizontal_reduction(),
       _vertical_reduction()
 {
@@ -64,6 +65,9 @@
     ARM_COMPUTE_ERROR_ON(input->info()->dimension(1) != pyramid->info()->height());
     ARM_COMPUTE_ERROR_ON(SCALE_PYRAMID_HALF != pyramid->info()->scale());
 
+    // Constant value to use for vertical fill border when the border mode is CONSTANT
+    const uint16_t pixel_value_u16 = static_cast<uint16_t>(constant_border_value) * 2 + static_cast<uint16_t>(constant_border_value) * 8 + static_cast<uint16_t>(constant_border_value) * 6;
+
     /* Get number of pyramid levels */
     const size_t num_levels = pyramid->info()->num_levels();
 
@@ -72,28 +76,31 @@
 
     if(num_levels > 1)
     {
-        _border_handler       = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(num_levels - 1);
-        _horizontal_reduction = arm_compute::support::cpp14::make_unique<CLGaussianPyramidHorKernel[]>(num_levels - 1);
-        _vertical_reduction   = arm_compute::support::cpp14::make_unique<CLGaussianPyramidVertKernel[]>(num_levels - 1);
+        _horizontal_border_handler = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(num_levels - 1);
+        _vertical_border_handler   = arm_compute::support::cpp14::make_unique<CLFillBorderKernel[]>(num_levels - 1);
+        _horizontal_reduction      = arm_compute::support::cpp14::make_unique<CLGaussianPyramidHorKernel[]>(num_levels - 1);
+        _vertical_reduction        = arm_compute::support::cpp14::make_unique<CLGaussianPyramidVertKernel[]>(num_levels - 1);
 
         // Apply half scale to the X dimension of the tensor shape
         TensorShape tensor_shape = pyramid->info()->tensor_shape();
         tensor_shape.set(0, (pyramid->info()->width() + 1) * SCALE_PYRAMID_HALF);
 
         PyramidInfo pyramid_info(num_levels - 1, SCALE_PYRAMID_HALF, tensor_shape, Format::U16);
-
         _tmp.init(pyramid_info);
 
         for(size_t i = 0; i < num_levels - 1; ++i)
         {
             /* Configure horizontal kernel */
-            _horizontal_reduction[i].configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i), border_mode == BorderMode::UNDEFINED);
+            _horizontal_reduction[i].configure(_pyramid->get_pyramid_level(i), _tmp.get_pyramid_level(i));
 
             /* Configure vertical kernel */
-            _vertical_reduction[i].configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1), border_mode == BorderMode::UNDEFINED);
+            _vertical_reduction[i].configure(_tmp.get_pyramid_level(i), _pyramid->get_pyramid_level(i + 1));
 
             /* Configure border */
-            _border_handler[i].configure(_pyramid->get_pyramid_level(i), _horizontal_reduction[i].border_size(), border_mode, PixelValue(constant_border_value));
+            _horizontal_border_handler[i].configure(_pyramid->get_pyramid_level(i), _horizontal_reduction[i].border_size(), border_mode, PixelValue(constant_border_value));
+
+            /* Configure border */
+            _vertical_border_handler[i].configure(_tmp.get_pyramid_level(i), _vertical_reduction[i].border_size(), border_mode, PixelValue(pixel_value_u16));
         }
         _tmp.allocate();
     }
@@ -110,13 +117,15 @@
     _pyramid->get_pyramid_level(0)->map(CLScheduler::get().queue(), true /* blocking */);
     _input->map(CLScheduler::get().queue(), true /* blocking */);
     _pyramid->get_pyramid_level(0)->copy_from(*_input);
+
     _input->unmap(CLScheduler::get().queue());
     _pyramid->get_pyramid_level(0)->unmap(CLScheduler::get().queue());
 
     for(unsigned int i = 0; i < num_levels - 1; ++i)
     {
-        CLScheduler::get().enqueue(_border_handler[i], false);
+        CLScheduler::get().enqueue(_horizontal_border_handler[i], false);
         CLScheduler::get().enqueue(_horizontal_reduction[i], false);
+        CLScheduler::get().enqueue(_vertical_border_handler[i], false);
         CLScheduler::get().enqueue(_vertical_reduction[i], false);
     }
 }
diff --git a/tests/validation/CL/GaussianPyramid.cpp b/tests/validation/CL/GaussianPyramid.cpp
new file mode 100644
index 0000000..2a4596d
--- /dev/null
+++ b/tests/validation/CL/GaussianPyramid.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+*/
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLGaussianPyramid.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/BorderModeDataset.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/GaussianPyramidHalfFixture.h"
+#include "tests/validation/reference/Utils.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+const auto small_gaussian_pyramid_levels = combine(datasets::Medium2DShapes(), datasets::BorderModes()) * framework::dataset::make("numlevels", 2, 4);
+const auto large_gaussian_pyramid_levels = combine(datasets::Large2DShapes(), datasets::BorderModes()) * framework::dataset::make("numlevels", 2, 5);
+
+template <typename T>
+inline void validate_gaussian_pyramid(const CLPyramid &target, const std::vector<SimpleTensor<T>> &reference, BorderMode border_mode)
+{
+    ValidRegion prev_valid_region = shape_to_valid_region(reference[0].shape());
+
+    for(size_t i = 1; i < reference.size(); ++i)
+    {
+        const ValidRegion valid_region = shape_to_valid_region_gaussian_pyramid_half(reference[i - 1].shape(), prev_valid_region, (border_mode == BorderMode::UNDEFINED));
+
+        // Validate outputs
+        validate(CLAccessor(*(target.get_pyramid_level(i))), reference[i], valid_region);
+
+        // Keep the valid region for the next level
+        prev_valid_region = valid_region;
+    }
+}
+
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(GaussianPyramid)
+TEST_SUITE(Half)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, large_gaussian_pyramid_levels,
+               shape, border_mode, num_levels)
+{
+    CLTensor src = create_tensor<CLTensor>(shape, DataType::U8);
+
+    // Create pyramid
+    PyramidInfo pyramid_info(num_levels, SCALE_PYRAMID_HALF, shape, Format::U8);
+    CLPyramid   dst;
+    dst.init(pyramid_info);
+
+    CLGaussianPyramidHalf gaussian_pyramid_half;
+    gaussian_pyramid_half.configure(&src, &dst, border_mode, 0);
+
+    ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+    for(size_t level = 0; level < pyramid_info.num_levels(); ++level)
+    {
+        ARM_COMPUTE_EXPECT(dst.get_pyramid_level(level)->info()->is_resizable(), framework::LogLevel::ERRORS);
+    }
+}
+
+template <typename T>
+using CLGaussianPyramidHalfFixture = GaussianPyramidHalfValidationFixture<CLTensor, CLAccessor, CLGaussianPyramidHalf, T, CLPyramid>;
+
+FIXTURE_DATA_TEST_CASE(RunSmallGaussianPyramidHalf, CLGaussianPyramidHalfFixture<uint8_t>, framework::DatasetMode::ALL, small_gaussian_pyramid_levels)
+{
+    validate_gaussian_pyramid(_target, _reference, _border_mode);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLargeGaussianPyramidHalf, CLGaussianPyramidHalfFixture<uint8_t>, framework::DatasetMode::NIGHTLY, large_gaussian_pyramid_levels)
+{
+    validate_gaussian_pyramid(_target, _reference, _border_mode);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/NEON/GaussianPyramid.cpp b/tests/validation/NEON/GaussianPyramid.cpp
index 0dea57e..c646b50 100644
--- a/tests/validation/NEON/GaussianPyramid.cpp
+++ b/tests/validation/NEON/GaussianPyramid.cpp
@@ -44,13 +44,11 @@
 {
 namespace
 {
-constexpr AbsoluteTolerance<float> tolerance_fp32(1.0f); /**< Tolerance value for comparing reference's output against implementation's output */
-
 const auto small_gaussian_pyramid_levels = combine(datasets::Medium2DShapes(), datasets::BorderModes()) * framework::dataset::make("numlevels", 2, 4);
 const auto large_gaussian_pyramid_levels = combine(datasets::Large2DShapes(), datasets::BorderModes()) * framework::dataset::make("numlevels", 2, 5);
 
-template <typename T, typename U>
-inline void validate_gaussian_pyramid(const Pyramid &target, const std::vector<SimpleTensor<T>> &reference, BorderMode border_mode, U tolerance, float tolerance_number = 0.0f)
+template <typename T>
+inline void validate_gaussian_pyramid(const Pyramid &target, const std::vector<SimpleTensor<T>> &reference, BorderMode border_mode)
 {
     ValidRegion prev_valid_region = shape_to_valid_region(reference[0].shape());
 
@@ -59,7 +57,7 @@
         const ValidRegion valid_region = shape_to_valid_region_gaussian_pyramid_half(reference[i - 1].shape(), prev_valid_region, (border_mode == BorderMode::UNDEFINED));
 
         // Validate outputs
-        validate(Accessor(*(target.get_pyramid_level(i))), reference[i], valid_region, tolerance, tolerance_number);
+        validate(Accessor(*(target.get_pyramid_level(i))), reference[i], valid_region);
 
         // Keep the valid region for the next level
         prev_valid_region = valid_region;
@@ -97,12 +95,12 @@
 
 FIXTURE_DATA_TEST_CASE(RunSmallGaussianPyramidHalf, NEGaussianPyramidHalfFixture<uint8_t>, framework::DatasetMode::ALL, small_gaussian_pyramid_levels)
 {
-    validate_gaussian_pyramid(_target, _reference, _border_mode, tolerance_fp32);
+    validate_gaussian_pyramid(_target, _reference, _border_mode);
 }
 
 FIXTURE_DATA_TEST_CASE(RunLargeGaussianPyramidHalf, NEGaussianPyramidHalfFixture<uint8_t>, framework::DatasetMode::NIGHTLY, large_gaussian_pyramid_levels)
 {
-    validate_gaussian_pyramid(_target, _reference, _border_mode, tolerance_fp32);
+    validate_gaussian_pyramid(_target, _reference, _border_mode);
 }
 TEST_SUITE_END()
 TEST_SUITE_END()
diff --git a/tests/validation/reference/GaussianPyramidHalf.cpp b/tests/validation/reference/GaussianPyramidHalf.cpp
index 0a68ded..7d5eb07 100644
--- a/tests/validation/reference/GaussianPyramidHalf.cpp
+++ b/tests/validation/reference/GaussianPyramidHalf.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017 ARM Limited.
+ * Copyright (c) 2017-2018 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *