COMPMID-494: Port BatchNormalizationLayer to new validation

Change-Id: Ief5334dd1cf571d977acf4ce9e5f580c5c9ab433
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/88158
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
diff --git a/tests/datasets/RandomBatchNormalizationLayerDataset.h b/tests/datasets/RandomBatchNormalizationLayerDataset.h
new file mode 100644
index 0000000..f4c61e0
--- /dev/null
+++ b/tests/datasets/RandomBatchNormalizationLayerDataset.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_RANDOM_BATCH_NORMALIZATION_LAYER_DATASET
+#define ARM_COMPUTE_TEST_RANDOM_BATCH_NORMALIZATION_LAYER_DATASET
+
+#include "tests/datasets/BatchNormalizationLayerDataset.h"
+
+#include "tests/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class RandomBatchNormalizationLayerDataset final : public BatchNormalizationLayerDataset
+{
+public:
+    RandomBatchNormalizationLayerDataset()
+    {
+        add_config(TensorShape(15U, 16U, 2U, 12U), TensorShape(2U), 0.1f);
+        add_config(TensorShape(21U, 11U, 12U, 7U), TensorShape(12U), 0.1f);
+        add_config(TensorShape(7U, 3U, 6U, 11U), TensorShape(6U), 0.1f);
+    }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_RANDOM_BATCH_NORMALIZATION_LAYER_DATASET */
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
new file mode 100644
index 0000000..ac30c63
--- /dev/null
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/CLTensorAllocator.h"
+#include "arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/RandomBatchNormalizationLayerDataset.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/BatchNormalizationLayerFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+constexpr AbsoluteTolerance<float> tolerance_f(0.00001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+constexpr AbsoluteTolerance<float> tolerance_qs8(3.0f);   /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */
+constexpr AbsoluteTolerance<float> tolerance_qs16(6.0f);  /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(BatchNormalizationLayer)
+
+template <typename T>
+using CLBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>;
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::RandomBatchNormalizationLayerDataset(), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })),
+               shape0, shape1, epsilon, dt)
+{
+    // Set fixed point position data type allowed
+    int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
+
+    // Create tensors
+    CLTensor src   = create_tensor<CLTensor>(shape0, dt, 1, fixed_point_position);
+    CLTensor dst   = create_tensor<CLTensor>(shape0, dt, 1, fixed_point_position);
+    CLTensor mean  = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
+    CLTensor var   = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
+    CLTensor beta  = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
+    CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
+
+    // Create and Configure function
+    CLBatchNormalizationLayer norm;
+    norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+
+    // Validate valid region
+    const ValidRegion valid_region = shape_to_valid_region(shape0);
+    validate(dst.info()->valid_region(), valid_region);
+}
+
+TEST_SUITE(Float)
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                                                                   framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f, 0);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(Quantized)
+template <typename T>
+using CLBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>;
+
+TEST_SUITE(QS8)
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                       framework::dataset::make("DataType", DataType::QS8)),
+                       framework::dataset::make("FractionalBits", 1, 6)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_qs8, 0);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(QS16)
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                       framework::dataset::make("DataType", DataType::QS16)),
+                       framework::dataset::make("FractionalBits", 1, 14)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_qs16, 0);
+}
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/CPP/BatchNormalizationLayer.cpp b/tests/validation/CPP/BatchNormalizationLayer.cpp
new file mode 100644
index 0000000..37e2d55
--- /dev/null
+++ b/tests/validation/CPP/BatchNormalizationLayer.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "BatchNormalizationLayer.h"
+
+#include "tests/validation/FixedPoint.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+// Batch Normalization Layer for fixed point type
+template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type *>
+SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
+                                          int fixed_point_position)
+{
+    SimpleTensor<T> result(src.shape(), src.data_type());
+
+    const auto cols       = static_cast<int>(src.shape()[0]);
+    const auto rows       = static_cast<int>(src.shape()[1]);
+    const auto depth      = static_cast<int>(src.shape()[2]);
+    int        upper_dims = src.shape().total_size() / (cols * rows * depth);
+
+    for(int r = 0; r < upper_dims; ++r)
+    {
+        for(int i = 0; i < depth; ++i)
+        {
+            for(int k = 0; k < rows; ++k)
+            {
+                for(int l = 0; l < cols; ++l)
+                {
+                    const int pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
+
+                    fixed_point_arithmetic::fixed_point<T> src_qs(src[pos], fixed_point_position, true);
+                    fixed_point_arithmetic::fixed_point<T> var_qs(var[i], fixed_point_position, true);
+                    fixed_point_arithmetic::fixed_point<T> mean_qs(mean[i], fixed_point_position, true);
+                    fixed_point_arithmetic::fixed_point<T> beta_qs(beta[i], fixed_point_position, true);
+                    fixed_point_arithmetic::fixed_point<T> gamma_qs(gamma[i], fixed_point_position, true);
+                    fixed_point_arithmetic::fixed_point<T> epsilon_qs(epsilon, fixed_point_position);
+
+                    auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs + epsilon_qs);
+                    auto numerator   = src_qs - mean_qs;
+                    auto x_bar       = numerator * denominator;
+                    x_bar            = beta_qs + x_bar * gamma_qs;
+                    result[pos]      = x_bar.raw();
+                }
+            }
+        }
+    }
+
+    return result;
+}
+
+// Batch Normalization Layer for floating point type
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type *>
+SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
+                                          int fixed_point_position)
+{
+    ARM_COMPUTE_UNUSED(fixed_point_position);
+
+    SimpleTensor<T> result(src.shape(), src.data_type());
+
+    const auto cols       = static_cast<int>(src.shape()[0]);
+    const auto rows       = static_cast<int>(src.shape()[1]);
+    const auto depth      = static_cast<int>(src.shape()[2]);
+    int        upper_dims = src.shape().total_size() / (cols * rows * depth);
+
+    for(int r = 0; r < upper_dims; ++r)
+    {
+        for(int i = 0; i < depth; ++i)
+        {
+            for(int k = 0; k < rows; ++k)
+            {
+                for(int l = 0; l < cols; ++l)
+                {
+                    const int   pos         = l + k * cols + i * rows * cols + r * cols * rows * depth;
+                    const float denominator = sqrt(var[i] + epsilon);
+                    const float numerator   = src[pos] - mean[i];
+                    const float x_bar       = numerator / denominator;
+                    result[pos]             = beta[i] + x_bar * gamma[i];
+                }
+            }
+        }
+    }
+    return result;
+}
+template SimpleTensor<float> batch_normalization_layer(const SimpleTensor<float> &src, const SimpleTensor<float> &mean, const SimpleTensor<float> &var, const SimpleTensor<float> &beta,
+                                                       const SimpleTensor<float> &gamma, float epsilon, int fixed_point_position);
+template SimpleTensor<int8_t> batch_normalization_layer(const SimpleTensor<int8_t> &src, const SimpleTensor<int8_t> &mean, const SimpleTensor<int8_t> &var, const SimpleTensor<int8_t> &beta,
+                                                        const SimpleTensor<int8_t> &gamma, float epsilon, int fixed_point_position);
+template SimpleTensor<int16_t> batch_normalization_layer(const SimpleTensor<int16_t> &src, const SimpleTensor<int16_t> &mean, const SimpleTensor<int16_t> &var, const SimpleTensor<int16_t> &beta,
+                                                         const SimpleTensor<int16_t> &gamma, float epsilon, int fixed_point_position);
+template SimpleTensor<half> batch_normalization_layer(const SimpleTensor<half> &src, const SimpleTensor<half> &mean, const SimpleTensor<half> &var,
+                                                      const SimpleTensor<half> &beta,
+                                                      const SimpleTensor<half> &gamma, float epsilon, int fixed_point_position);
+
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/CPP/BatchNormalizationLayer.h b/tests/validation/CPP/BatchNormalizationLayer.h
new file mode 100644
index 0000000..1a554ad
--- /dev/null
+++ b/tests/validation/CPP/BatchNormalizationLayer.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_H__
+#define __ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_H__
+
+#include "tests/SimpleTensor.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace reference
+{
+template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
+SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
+                                          int fixed_point_position);
+
+template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
+SimpleTensor<T> batch_normalization_layer(const SimpleTensor<T> &src, const SimpleTensor<T> &mean, const SimpleTensor<T> &var, const SimpleTensor<T> &beta, const SimpleTensor<T> &gamma, float epsilon,
+                                          int fixed_point_position);
+} // namespace reference
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_H__ */
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 85002eb..30c6724 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -202,6 +202,31 @@
         table[i] = distribution(generator);
     }
 }
+
+/** Helper function to get the testing range for batch normalization layer.
+ *
+ * @param[in] fixed_point_position (Optional) Number of bits for the fractional part. Defaults to 1.
+ *
+ * @return A pair containing the lower upper testing bounds.
+ */
+template <typename T>
+std::pair<T, T> get_batchnormalization_layer_test_bounds(int fixed_point_position = 1)
+{
+    bool is_float = std::is_floating_point<T>::value;
+    std::pair<T, T> bounds;
+
+    // Set initial values
+    if(is_float)
+    {
+        bounds = std::make_pair(-1.f, 1.f);
+    }
+    else
+    {
+        bounds = std::make_pair(1, 1 << (fixed_point_position));
+    }
+
+    return bounds;
+}
 } // namespace validation
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
new file mode 100644
index 0000000..9ca26eb
--- /dev/null
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/NEON/Accessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/RandomBatchNormalizationLayerDataset.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/BatchNormalizationLayerFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+constexpr AbsoluteTolerance<float> tolerance_f32(0.00001f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+#ifdef ARM_COMPUTE_ENABLE_FP16
+constexpr AbsoluteTolerance<float> tolerance_f16(0.01f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
+#endif                                                   /* ARM_COMPUTE_ENABLE_FP16 */
+constexpr AbsoluteTolerance<float> tolerance_qs8(3.0f);  /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS8 */
+constexpr AbsoluteTolerance<float> tolerance_qs16(6.0f); /**< Tolerance value for comparing reference's output against implementation's output for DataType::QS16 */
+} // namespace
+
+TEST_SUITE(NEON)
+TEST_SUITE(BatchNormalizationLayer)
+
+template <typename T>
+using NEBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>;
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::RandomBatchNormalizationLayerDataset(), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })),
+               shape0, shape1, epsilon, dt)
+{
+    // Set fixed point position data type allowed
+    int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
+
+    // Create tensors
+    Tensor src   = create_tensor<Tensor>(shape0, dt, 1, fixed_point_position);
+    Tensor dst   = create_tensor<Tensor>(shape0, dt, 1, fixed_point_position);
+    Tensor mean  = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
+    Tensor var   = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
+    Tensor beta  = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
+    Tensor gamma = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
+
+    // Create and Configure function
+    NEBatchNormalizationLayer norm;
+    norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+
+    // Validate valid region
+    const ValidRegion valid_region = shape_to_valid_region(shape0);
+    validate(dst.info()->valid_region(), valid_region);
+}
+
+TEST_SUITE(Float)
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                                                                   framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f32, 0);
+}
+TEST_SUITE_END()
+
+#ifdef ARM_COMPUTE_ENABLE_FP16
+TEST_SUITE(Float16)
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                                                                  framework::dataset::make("DataType", DataType::F16)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_f16, 0);
+}
+TEST_SUITE_END()
+#endif /* ARM_COMPUTE_ENABLE_FP16 */
+
+TEST_SUITE(Quantized)
+template <typename T>
+using NEBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>;
+
+TEST_SUITE(QS8)
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                       framework::dataset::make("DataType", DataType::QS8)),
+                       framework::dataset::make("FractionalBits", 1, 6)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_qs8, 0);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(QS16)
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                       framework::dataset::make("DataType", DataType::QS16)),
+                       framework::dataset::make("FractionalBits", 1, 14)))
+{
+    // Validate output
+    validate(Accessor(_target), _reference, tolerance_qs16, 0);
+}
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+
+TEST_SUITE_END()
+TEST_SUITE_END()
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
new file mode 100644
index 0000000..f4772a8
--- /dev/null
+++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2017 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FIXTURE
+#define ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FIXTURE
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/AssetsLibrary.h"
+#include "tests/Globals.h"
+#include "tests/IAccessor.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/CPP/BatchNormalizationLayer.h"
+#include "tests/validation/Helpers.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class BatchNormalizationLayerValidationFixedPointFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape0, TensorShape shape1, float epsilon, DataType dt, int fractional_bits)
+    {
+        _fractional_bits = fractional_bits;
+        _data_type       = dt;
+        _target          = compute_target(shape0, shape1, epsilon, dt, fractional_bits);
+        _reference       = compute_reference(shape0, shape1, epsilon, dt, fractional_bits);
+    }
+
+protected:
+    template <typename U>
+    void fill(U &&src_tensor, U &&mean_tensor, U &&var_tensor, U &&beta_tensor, U &&gamma_tensor)
+    {
+        if(is_data_type_float(_data_type))
+        {
+            float min_bound = 0.f;
+            float max_bound = 0.f;
+            std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>();
+            std::uniform_real_distribution<> distribution(min_bound, max_bound);
+            std::uniform_real_distribution<> distribution_var(0, max_bound);
+            library->fill(src_tensor, distribution, 0);
+            library->fill(mean_tensor, distribution, 1);
+            library->fill(var_tensor, distribution_var, 0);
+            library->fill(beta_tensor, distribution, 3);
+            library->fill(gamma_tensor, distribution, 4);
+        }
+        else
+        {
+            int min_bound = 0;
+            int max_bound = 0;
+            std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<T>(_fractional_bits);
+            std::uniform_int_distribution<> distribution(min_bound, max_bound);
+            std::uniform_int_distribution<> distribution_var(0, max_bound);
+            library->fill(src_tensor, distribution, 0);
+            library->fill(mean_tensor, distribution, 1);
+            library->fill(var_tensor, distribution_var, 0);
+            library->fill(beta_tensor, distribution, 3);
+            library->fill(gamma_tensor, distribution, 4);
+        }
+    }
+
+    TensorType compute_target(const TensorShape &shape0, const TensorShape &shape1, float epsilon, DataType dt, int fixed_point_position)
+    {
+        // Create tensors
+        TensorType src   = create_tensor<TensorType>(shape0, dt, 1, fixed_point_position);
+        TensorType dst   = create_tensor<TensorType>(shape0, dt, 1, fixed_point_position);
+        TensorType mean  = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
+        TensorType var   = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
+        TensorType beta  = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
+        TensorType gamma = create_tensor<TensorType>(shape1, dt, 1, fixed_point_position);
+
+        // Create and configure function
+        FunctionType norm;
+        norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+
+        ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(mean.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(var.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(beta.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Allocate tensors
+        src.allocator()->allocate();
+        dst.allocator()->allocate();
+        mean.allocator()->allocate();
+        var.allocator()->allocate();
+        beta.allocator()->allocate();
+        gamma.allocator()->allocate();
+
+        ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!mean.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!var.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!beta.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!gamma.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Fill tensors
+        fill(AccessorType(src), AccessorType(mean), AccessorType(var), AccessorType(beta), AccessorType(gamma));
+
+        // Compute function
+        norm.run();
+
+        return dst;
+    }
+
+    SimpleTensor<T> compute_reference(const TensorShape &shape0, const TensorShape &shape1, float epsilon, DataType dt, int fixed_point_position)
+    {
+        // Create reference
+        SimpleTensor<T> ref_src{ shape0, dt, 1, fixed_point_position };
+        SimpleTensor<T> ref_mean{ shape1, dt, 1, fixed_point_position };
+        SimpleTensor<T> ref_var{ shape1, dt, 1, fixed_point_position };
+        SimpleTensor<T> ref_beta{ shape1, dt, 1, fixed_point_position };
+        SimpleTensor<T> ref_gamma{ shape1, dt, 1, fixed_point_position };
+
+        // Fill reference
+        fill(ref_src, ref_mean, ref_var, ref_beta, ref_gamma);
+
+        return reference::batch_normalization_layer(ref_src, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, fixed_point_position);
+    }
+
+    TensorType      _target{};
+    SimpleTensor<T> _reference{};
+    int             _fractional_bits{};
+    DataType        _data_type{};
+};
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class BatchNormalizationLayerValidationFixture : public BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape0, TensorShape shape1, float epsilon, DataType dt)
+    {
+        BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, dt, 0);
+    }
+};
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_BATCH_NORMALIZATION_LAYER_FIXTURE */
diff --git a/tests/validation_old/CL/BatchNormalizationLayer.cpp b/tests/validation_old/CL/BatchNormalizationLayer.cpp
deleted file mode 100644
index 75c9a58..0000000
--- a/tests/validation_old/CL/BatchNormalizationLayer.cpp
+++ /dev/null
@@ -1,227 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "CL/CLAccessor.h"
-#include "TypePrinter.h"
-#include "Utils.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/validation_old/Datasets.h"
-#include "tests/validation_old/Helpers.h"
-#include "tests/validation_old/Reference.h"
-#include "tests/validation_old/Validation.h"
-#include "tests/validation_old/dataset/BatchNormalizationLayerDataset.h"
-
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/Types.h"
-#include "arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h"
-#include "arm_compute/runtime/Tensor.h"
-#include "arm_compute/runtime/TensorAllocator.h"
-
-#include <random>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-using namespace arm_compute::test::validation;
-
-namespace
-{
-const float tolerance_f    = 1e-05; /**< Tolerance value for comparing reference's output against floating point implementation's output */
-const float tolerance_qs8  = 3;     /**< Tolerance value for comparing reference's output against quantized implementation's output */
-const float tolerance_qs16 = 6;     /**< Tolerance value for comparing reference's output against quantized implementation's output */
-
-/** Compute Neon batch normalization function.
- *
- * @param[in] shape     Shape of the input and output tensors.
- * @param[in] dt        Data type of input and output tensors.
- * @param[in] norm_info Normalization Layer information.
- *
- * @return Computed output tensor.
- */
-CLTensor compute_reference_batch_normalization_layer(const TensorShape &shape0, const TensorShape &shape1, DataType dt, float epsilon, int fixed_point_position = 0)
-{
-    // Create tensors
-    CLTensor src   = create_tensor<CLTensor>(shape0, dt, 1, fixed_point_position);
-    CLTensor dst   = create_tensor<CLTensor>(shape0, dt, 1, fixed_point_position);
-    CLTensor mean  = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
-    CLTensor var   = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
-    CLTensor beta  = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
-    CLTensor gamma = create_tensor<CLTensor>(shape1, dt, 1, fixed_point_position);
-
-    // Create and configure function
-    CLBatchNormalizationLayer norm;
-    norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
-
-    // Allocate tensors
-    src.allocator()->allocate();
-    dst.allocator()->allocate();
-    mean.allocator()->allocate();
-    var.allocator()->allocate();
-    beta.allocator()->allocate();
-    gamma.allocator()->allocate();
-
-    BOOST_TEST(!src.info()->is_resizable());
-    BOOST_TEST(!dst.info()->is_resizable());
-    BOOST_TEST(!mean.info()->is_resizable());
-    BOOST_TEST(!var.info()->is_resizable());
-    BOOST_TEST(!beta.info()->is_resizable());
-    BOOST_TEST(!gamma.info()->is_resizable());
-
-    // Fill tensors
-    if(dt == DataType::F32)
-    {
-        float min_bound = 0.f;
-        float max_bound = 0.f;
-        std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<float>();
-        std::uniform_real_distribution<> distribution(min_bound, max_bound);
-        std::uniform_real_distribution<> distribution_var(0, max_bound);
-        library->fill(CLAccessor(src), distribution, 0);
-        library->fill(CLAccessor(mean), distribution, 1);
-        library->fill(CLAccessor(var), distribution_var, 0);
-        library->fill(CLAccessor(beta), distribution, 3);
-        library->fill(CLAccessor(gamma), distribution, 4);
-    }
-    else
-    {
-        int min_bound = 0;
-        int max_bound = 0;
-        if(dt == DataType::QS8)
-        {
-            std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<int8_t>(fixed_point_position);
-        }
-        else
-        {
-            std::tie(min_bound, max_bound) = get_batchnormalization_layer_test_bounds<int16_t>(fixed_point_position);
-        }
-        std::uniform_int_distribution<> distribution(min_bound, max_bound);
-        std::uniform_int_distribution<> distribution_var(0, max_bound);
-        library->fill(CLAccessor(src), distribution, 0);
-        library->fill(CLAccessor(mean), distribution, 1);
-        library->fill(CLAccessor(var), distribution_var, 0);
-        library->fill(CLAccessor(beta), distribution, 3);
-        library->fill(CLAccessor(gamma), distribution, 4);
-    }
-
-    // Compute function
-    norm.run();
-
-    return dst;
-}
-} // namespace
-
-#ifndef DOXYGEN_SKIP_THIS
-BOOST_AUTO_TEST_SUITE(CL)
-BOOST_AUTO_TEST_SUITE(BatchNormalizationLayer)
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(Configuration, RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make({ DataType::QS8, DataType::QS16, DataType::F32 }), obj, dt)
-{
-    // Set fixed point position data type allowed
-    int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
-    // Create tensors
-    CLTensor src   = create_tensor<CLTensor>(obj.shape0, dt, 1, fixed_point_position);
-    CLTensor dst   = create_tensor<CLTensor>(obj.shape0, dt, 1, fixed_point_position);
-    CLTensor mean  = create_tensor<CLTensor>(obj.shape1, dt, 1, fixed_point_position);
-    CLTensor var   = create_tensor<CLTensor>(obj.shape1, dt, 1, fixed_point_position);
-    CLTensor beta  = create_tensor<CLTensor>(obj.shape1, dt, 1, fixed_point_position);
-    CLTensor gamma = create_tensor<CLTensor>(obj.shape1, dt, 1, fixed_point_position);
-
-    BOOST_TEST(src.info()->is_resizable());
-    BOOST_TEST(dst.info()->is_resizable());
-    BOOST_TEST(mean.info()->is_resizable());
-    BOOST_TEST(var.info()->is_resizable());
-    BOOST_TEST(beta.info()->is_resizable());
-    BOOST_TEST(gamma.info()->is_resizable());
-
-    // Create and configure function
-    CLBatchNormalizationLayer norm;
-    norm.configure(&src, &dst, &mean, &var, &beta, &gamma, obj.epsilon);
-
-    // Validate valid region
-    const ValidRegion valid_region     = shape_to_valid_region(obj.shape0);
-    const ValidRegion valid_region_vec = shape_to_valid_region(obj.shape1);
-    validate(src.info()->valid_region(), valid_region);
-    validate(dst.info()->valid_region(), valid_region);
-    validate(mean.info()->valid_region(), valid_region_vec);
-    validate(var.info()->valid_region(), valid_region_vec);
-    validate(beta.info()->valid_region(), valid_region_vec);
-    validate(gamma.info()->valid_region(), valid_region_vec);
-}
-
-BOOST_AUTO_TEST_SUITE(Float)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(Random,
-                     RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::F32),
-                     obj, dt)
-{
-    // Compute function
-    CLTensor dst = compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance_f, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(Quantized)
-
-BOOST_AUTO_TEST_SUITE(QS8)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(Random,
-                     RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::QS8) * boost::unit_test::data::xrange(1, 6),
-                     obj, dt, fixed_point_position)
-{
-    // Compute function
-    CLTensor dst = compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance_qs8, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(QS16)
-BOOST_DATA_TEST_CASE(Random,
-                     RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::QS16) * boost::unit_test::data::xrange(1, 14),
-                     obj, dt, fixed_point_position)
-{
-    // Compute function
-    CLTensor dst = compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
-
-    // Validate output
-    validate(CLAccessor(dst), ref_dst, tolerance_qs16, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation_old/NEON/BatchNormalizationLayer.cpp b/tests/validation_old/NEON/BatchNormalizationLayer.cpp
deleted file mode 100644
index d98f99a..0000000
--- a/tests/validation_old/NEON/BatchNormalizationLayer.cpp
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Copyright (c) 2017 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "NEON/Accessor.h"
-#include "TypePrinter.h"
-#include "tests/Globals.h"
-#include "tests/NEON/Helper.h"
-#include "tests/Utils.h"
-#include "tests/validation_old/Datasets.h"
-#include "tests/validation_old/Helpers.h"
-#include "tests/validation_old/Reference.h"
-#include "tests/validation_old/Validation.h"
-#include "tests/validation_old/dataset/BatchNormalizationLayerDataset.h"
-
-#include "arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h"
-
-#include <random>
-
-using namespace arm_compute;
-using namespace arm_compute::test;
-using namespace arm_compute::test::validation;
-
-namespace
-{
-const float tolerance_qs8  = 6;      /**< Tolerance value for comparing reference's output against quantized implementation's output */
-const float tolerance_qs16 = 6;      /**< Tolerance value for comparing reference's output against quantized implementation's output */
-const float tolerance_f32  = 1e-05f; /**< Tolerance value for comparing reference's output against floating point implementation's output */
-#ifdef ARM_COMPUTE_ENABLE_FP16
-const float tolerance_f16 = 0.01f; /**< Tolerance value for comparing reference's output against half precision floating point implementation's output */
-#endif                             /* ARM_COMPUTE_ENABLE_FP16 */
-
-/** Compute Neon batch normalization function.
- *
- * @param[in] shape     Shape of the input and output tensors.
- * @param[in] dt        Data type of input and output tensors.
- * @param[in] norm_info Normalization Layer information.
- *
- * @return Computed output tensor.
- */
-Tensor compute_reference_batch_normalization_layer(const TensorShape &shape0, const TensorShape &shape1, DataType dt, float epsilon, int fixed_point_position = 0)
-{
-    // Create tensors
-    Tensor src   = create_tensor<Tensor>(shape0, dt, 1, fixed_point_position);
-    Tensor dst   = create_tensor<Tensor>(shape0, dt, 1, fixed_point_position);
-    Tensor mean  = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
-    Tensor var   = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
-    Tensor beta  = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
-    Tensor gamma = create_tensor<Tensor>(shape1, dt, 1, fixed_point_position);
-
-    // Create and configure function
-    NEBatchNormalizationLayer norm;
-    norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
-
-    // Allocate tensors
-    src.allocator()->allocate();
-    dst.allocator()->allocate();
-    mean.allocator()->allocate();
-    var.allocator()->allocate();
-    beta.allocator()->allocate();
-    gamma.allocator()->allocate();
-
-    BOOST_TEST(!src.info()->is_resizable());
-    BOOST_TEST(!dst.info()->is_resizable());
-    BOOST_TEST(!mean.info()->is_resizable());
-    BOOST_TEST(!var.info()->is_resizable());
-    BOOST_TEST(!beta.info()->is_resizable());
-    BOOST_TEST(!gamma.info()->is_resizable());
-
-    // Fill tensors
-    switch(dt)
-    {
-        case DataType::QS8:
-        {
-            const std::pair<int8_t, int8_t> bounds = get_batchnormalization_layer_test_bounds<int8_t>(fixed_point_position);
-            std::uniform_int_distribution<> distribution(bounds.first, bounds.second);
-            std::uniform_int_distribution<> distribution_var(0, bounds.second);
-            test::fill_tensors(distribution, { 0, 1, 3, 4 }, &src, &mean, &beta, &gamma);
-            test::fill_tensors(distribution_var, { 0 }, &var);
-            break;
-        }
-        case DataType::QS16:
-        {
-            const std::pair<int16_t, int16_t> bounds = get_batchnormalization_layer_test_bounds<int16_t>(fixed_point_position);
-            std::uniform_int_distribution<> distribution(bounds.first, bounds.second);
-            std::uniform_int_distribution<> distribution_var(0, bounds.second);
-            test::fill_tensors(distribution, { 0, 1, 3, 4 }, &src, &mean, &beta, &gamma);
-            test::fill_tensors(distribution_var, { 0 }, &var);
-            break;
-        }
-#ifdef ARM_COMPUTE_ENABLE_FP16
-        case DataType::F16:
-        {
-            const std::pair<half_float::half, half_float::half> bounds = get_batchnormalization_layer_test_bounds<half_float::half>();
-            std::uniform_real_distribution<> distribution(bounds.first, bounds.second);
-            std::uniform_real_distribution<> distribution_var(0, bounds.second);
-            test::fill_tensors(distribution, { 0, 1, 3, 4 }, &src, &mean, &beta, &gamma);
-            test::fill_tensors(distribution_var, { 0 }, &var);
-            break;
-        }
-#endif /* ARM_COMPUTE_ENABLE_FP16 */
-        case DataType::F32:
-        {
-            const std::pair<float, float> bounds = get_batchnormalization_layer_test_bounds<float>();
-            std::uniform_real_distribution<> distribution(bounds.first, bounds.second);
-            std::uniform_real_distribution<> distribution_var(0, bounds.second);
-            test::fill_tensors(distribution, { 0, 1, 3, 4 }, &src, &mean, &beta, &gamma);
-            test::fill_tensors(distribution_var, { 0 }, &var);
-            break;
-        }
-        default:
-        {
-            ARM_COMPUTE_ERROR("Not supported");
-            break;
-        }
-    }
-
-    // Compute function
-    norm.run();
-
-    return dst;
-}
-} // namespace
-
-#ifndef DOXYGEN_SKIP_THIS
-BOOST_AUTO_TEST_SUITE(NEON)
-BOOST_AUTO_TEST_SUITE(BatchNormalizationLayer)
-
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit") * boost::unit_test::label("nightly"))
-BOOST_DATA_TEST_CASE(Configuration, RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make({ DataType::QS8, DataType::QS16, DataType::F32 }), obj, dt)
-{
-    // Set fixed point position data type allowed
-    int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
-
-    // Create tensors
-    Tensor src   = create_tensor<Tensor>(obj.shape0, dt, 1, fixed_point_position);
-    Tensor dst   = create_tensor<Tensor>(obj.shape0, dt, 1, fixed_point_position);
-    Tensor mean  = create_tensor<Tensor>(obj.shape1, dt, 1, fixed_point_position);
-    Tensor var   = create_tensor<Tensor>(obj.shape1, dt, 1, fixed_point_position);
-    Tensor beta  = create_tensor<Tensor>(obj.shape1, dt, 1, fixed_point_position);
-    Tensor gamma = create_tensor<Tensor>(obj.shape1, dt, 1, fixed_point_position);
-
-    BOOST_TEST(src.info()->is_resizable());
-    BOOST_TEST(dst.info()->is_resizable());
-    BOOST_TEST(mean.info()->is_resizable());
-    BOOST_TEST(var.info()->is_resizable());
-    BOOST_TEST(beta.info()->is_resizable());
-    BOOST_TEST(gamma.info()->is_resizable());
-
-    // Create and configure function
-    NEBatchNormalizationLayer norm;
-    norm.configure(&src, &dst, &mean, &var, &beta, &gamma, obj.epsilon);
-
-    // Validate valid region
-    const ValidRegion valid_region     = shape_to_valid_region(obj.shape0);
-    const ValidRegion valid_region_vec = shape_to_valid_region(obj.shape1);
-    validate(src.info()->valid_region(), valid_region);
-    validate(dst.info()->valid_region(), valid_region);
-    validate(mean.info()->valid_region(), valid_region_vec);
-    validate(var.info()->valid_region(), valid_region_vec);
-    validate(beta.info()->valid_region(), valid_region_vec);
-    validate(gamma.info()->valid_region(), valid_region_vec);
-}
-
-BOOST_AUTO_TEST_SUITE(Float)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(Random,
-                     RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::F32),
-                     obj, dt)
-{
-    // Compute function
-    Tensor dst = compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_f32, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-#ifdef ARM_COMPUTE_ENABLE_FP16
-BOOST_AUTO_TEST_SUITE(Float16)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(Random,
-                     RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::F16),
-                     obj, dt)
-{
-    // Compute function
-    Tensor dst = compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_f16, 0);
-}
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* ARM_COMPUTE_ENABLE_FP16 */
-
-BOOST_AUTO_TEST_SUITE(Quantized)
-BOOST_AUTO_TEST_SUITE(QS8)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(Random,
-                     RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::QS8) * boost::unit_test::data::xrange(1, 6),
-                     obj, dt, fixed_point_position)
-{
-    // Compute function
-    Tensor dst = compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_qs8);
-}
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE(QS16)
-BOOST_TEST_DECORATOR(*boost::unit_test::label("precommit"))
-BOOST_DATA_TEST_CASE(Random,
-                     RandomBatchNormalizationLayerDataset() * boost::unit_test::data::make(DataType::QS16) * boost::unit_test::data::xrange(1, 14),
-                     obj, dt, fixed_point_position)
-{
-    // Compute function
-    Tensor dst = compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
-
-    // Compute reference
-    RawTensor ref_dst = Reference::compute_reference_batch_normalization_layer(obj.shape0, obj.shape1, dt, obj.epsilon, fixed_point_position);
-
-    // Validate output
-    validate(Accessor(dst), ref_dst, tolerance_qs16);
-}
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-
-BOOST_AUTO_TEST_SUITE_END()
-BOOST_AUTO_TEST_SUITE_END()
-#endif /* DOXYGEN_SKIP_THIS */
diff --git a/tests/validation_old/Reference.cpp b/tests/validation_old/Reference.cpp
index 6a52cd0..fc54846 100644
--- a/tests/validation_old/Reference.cpp
+++ b/tests/validation_old/Reference.cpp
@@ -284,68 +284,6 @@
     return ref_dst;
 }
 
-RawTensor Reference::compute_reference_batch_normalization_layer(const TensorShape &shape0, const TensorShape &shape1, DataType dt, float epsilon, int fixed_point_position)
-{
-    // Create reference
-    RawTensor ref_src(shape0, dt, 1, fixed_point_position);
-    RawTensor ref_dst(shape0, dt, 1, fixed_point_position);
-    RawTensor ref_mean(shape1, dt, 1, fixed_point_position);
-    RawTensor ref_var(shape1, dt, 1, fixed_point_position);
-    RawTensor ref_beta(shape1, dt, 1, fixed_point_position);
-    RawTensor ref_gamma(shape1, dt, 1, fixed_point_position);
-
-    // Fill tensors
-    switch(dt)
-    {
-        case DataType::QS8:
-        {
-            const std::pair<int8_t, int8_t> bounds = get_batchnormalization_layer_test_bounds<int8_t>(fixed_point_position);
-            std::uniform_int_distribution<> distribution(bounds.first, bounds.second);
-            std::uniform_int_distribution<> distribution_var(0, bounds.second);
-            fill_tensors(distribution, { 0, 1, 3, 4 }, &ref_src, &ref_mean, &ref_beta, &ref_gamma);
-            fill_tensors(distribution_var, { 0 }, &ref_var);
-            break;
-        }
-        case DataType::QS16:
-        {
-            const std::pair<int16_t, int16_t> bounds = get_batchnormalization_layer_test_bounds<int16_t>(fixed_point_position);
-            std::uniform_int_distribution<> distribution(bounds.first, bounds.second);
-            std::uniform_int_distribution<> distribution_var(0, bounds.second);
-            fill_tensors(distribution, { 0, 1, 3, 4 }, &ref_src, &ref_mean, &ref_beta, &ref_gamma);
-            fill_tensors(distribution_var, { 0 }, &ref_var);
-            break;
-        }
-        case DataType::F16:
-        {
-            const std::pair<half_float::half, half_float::half> bounds = get_batchnormalization_layer_test_bounds<half_float::half>();
-            std::uniform_real_distribution<> distribution(bounds.first, bounds.second);
-            std::uniform_real_distribution<> distribution_var(0, bounds.second);
-            fill_tensors(distribution, { 0, 1, 3, 4 }, &ref_src, &ref_mean, &ref_beta, &ref_gamma);
-            fill_tensors(distribution_var, { 0 }, &ref_var);
-            break;
-        }
-        case DataType::F32:
-        {
-            const std::pair<float, float> bounds = get_batchnormalization_layer_test_bounds<float>();
-            std::uniform_real_distribution<> distribution(bounds.first, bounds.second);
-            std::uniform_real_distribution<> distribution_var(0, bounds.second);
-            fill_tensors(distribution, { 0, 1, 3, 4 }, &ref_src, &ref_mean, &ref_beta, &ref_gamma);
-            fill_tensors(distribution_var, { 0 }, &ref_var);
-            break;
-        }
-        default:
-        {
-            ARM_COMPUTE_ERROR("Not supported");
-            break;
-        }
-    }
-
-    // Compute reference
-    ReferenceCPP::batch_normalization_layer(ref_src, ref_dst, ref_mean, ref_var, ref_beta, ref_gamma, epsilon, fixed_point_position);
-
-    return ref_dst;
-}
-
 RawTensor Reference::compute_reference_roi_pooling_layer(const TensorShape &shape, DataType dt, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info)
 {
     TensorShape shape_dst;
diff --git a/tests/validation_old/Reference.h b/tests/validation_old/Reference.h
index 9c7baac..e363bb2 100644
--- a/tests/validation_old/Reference.h
+++ b/tests/validation_old/Reference.h
@@ -204,17 +204,6 @@
     static RawTensor compute_reference_warp_perspective(const TensorShape &shape, RawTensor &valid_mask, const float *matrix, InterpolationPolicy policy, BorderMode border_mode,
                                                         uint8_t constant_border_value);
 
-    /** Compute reference batch normalization layer.
-     *
-     * @param[in] shape0               Shape of the input and output tensors.
-     * @param[in] shape1               Shape of the vector tensors.
-     * @param[in] dt                   Data type of all input and output tensors.
-     * @param[in] epsilon              Small value to avoid division with zero.
-     * @param[in] fixed_point_position Fixed point position.
-     *
-     * @return Computed raw tensor.
-     */
-    static RawTensor compute_reference_batch_normalization_layer(const TensorShape &shape0, const TensorShape &shape1, DataType dt, float epsilon, int fixed_point_position = 0);
     /** Compute reference roi pooling layer.
      *
      * @param[in] shape     Shape of the input tensor.
diff --git a/tests/validation_old/ReferenceCPP.cpp b/tests/validation_old/ReferenceCPP.cpp
index 86dc589..eae892a 100644
--- a/tests/validation_old/ReferenceCPP.cpp
+++ b/tests/validation_old/ReferenceCPP.cpp
@@ -212,19 +212,6 @@
     tensor_operations::warp_perspective(s, d, vmask, matrix, policy, border_mode, constant_border_value);
 }
 
-// Batch Normalization Layer
-void ReferenceCPP::batch_normalization_layer(const RawTensor &src, RawTensor &dst, const RawTensor &mean, const RawTensor &var, const RawTensor &beta, const RawTensor &gamma, float epsilon,
-                                             int fixed_point_position)
-{
-    const TensorVariant s = TensorFactory::get_tensor(src);
-    TensorVariant       d = TensorFactory::get_tensor(dst);
-    const TensorVariant m = TensorFactory::get_tensor(mean);
-    const TensorVariant v = TensorFactory::get_tensor(var);
-    const TensorVariant b = TensorFactory::get_tensor(beta);
-    const TensorVariant g = TensorFactory::get_tensor(gamma);
-    boost::apply_visitor(tensor_visitors::batch_normalization_layer_visitor(s, m, v, b, g, epsilon, fixed_point_position), d);
-}
-
 // ROI Pooling Layer
 void ReferenceCPP::roi_pooling_layer(const RawTensor &src, RawTensor &dst, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info)
 {
diff --git a/tests/validation_old/ReferenceCPP.h b/tests/validation_old/ReferenceCPP.h
index 5bc10a5..2f02afc 100644
--- a/tests/validation_old/ReferenceCPP.h
+++ b/tests/validation_old/ReferenceCPP.h
@@ -198,20 +198,6 @@
      * @param[in]  constant_border_value Constant value to use for borders if border_mode is set to CONSTANT.
      */
     static void warp_perspective(const RawTensor &src, RawTensor &dst, RawTensor &valid_mask, const float *matrix, InterpolationPolicy policy, BorderMode border_mode, uint8_t constant_border_value);
-
-    /** Batch Normalization of @p src based on the information from @p norm_info.
-     *
-     * @param[in]  src                  Input tensor.
-     * @param[out] dst                  Result tensor.
-     * @param[out] mean                 Mean vector tensor.
-     * @param[out] var                  Var vector tensor.
-     * @param[out] beta                 Beta vector tensor.
-     * @param[out] gamma                Gamma vector tensor.
-     * @param[in]  epsilon              Small value to avoid division with zero.
-     * @param[in]  fixed_point_position Fixed point position.
-     */
-    static void batch_normalization_layer(const RawTensor &src, RawTensor &dst, const RawTensor &mean, const RawTensor &var, const RawTensor &beta, const RawTensor &gamma, float epsilon,
-                                          int fixed_point_position = 0);
     /** ROI Pooling layer of @p src based on the information from @p pool_info and @p rois.
      *
      * @param[in]  src       Input tensor.
diff --git a/tests/validation_old/TensorOperations.h b/tests/validation_old/TensorOperations.h
index 0c1ab41..04a79f0 100644
--- a/tests/validation_old/TensorOperations.h
+++ b/tests/validation_old/TensorOperations.h
@@ -861,70 +861,6 @@
     }
 }
 
-// Batch Normalization Layer for fixed point type
-template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type * = nullptr>
-void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)
-{
-    const int cols       = static_cast<int>(in.shape()[0]);
-    const int rows       = static_cast<int>(in.shape()[1]);
-    const int depth      = static_cast<int>(in.shape()[2]);
-    int       upper_dims = in.shape().total_size() / (cols * rows * depth);
-
-    for(int r = 0; r < upper_dims; ++r)
-    {
-        for(int i = 0; i < depth; ++i)
-        {
-            for(int k = 0; k < rows; ++k)
-            {
-                for(int l = 0; l < cols; ++l)
-                {
-                    const int                              pos = l + k * cols + i * rows * cols + r * cols * rows * depth;
-                    fixed_point_arithmetic::fixed_point<T> in_qs(in[pos], fixed_point_position, true);
-                    fixed_point_arithmetic::fixed_point<T> var_qs(var[i], fixed_point_position, true);
-                    fixed_point_arithmetic::fixed_point<T> mean_qs(mean[i], fixed_point_position, true);
-                    fixed_point_arithmetic::fixed_point<T> beta_qs(beta[i], fixed_point_position, true);
-                    fixed_point_arithmetic::fixed_point<T> gamma_qs(gamma[i], fixed_point_position, true);
-                    fixed_point_arithmetic::fixed_point<T> epsilon_qs(epsilon, fixed_point_position);
-
-                    auto denominator = fixed_point_arithmetic::inv_sqrt(var_qs + epsilon_qs);
-                    auto numerator   = in_qs - mean_qs;
-                    auto x_bar       = numerator * denominator;
-                    x_bar            = beta_qs + x_bar * gamma_qs;
-                    out[pos]         = x_bar.raw();
-                }
-            }
-        }
-    }
-}
-
-// Batch Normalization Layer for floating point type
-template <typename T, typename std::enable_if<is_floating_point<T>::value, int>::type * = nullptr>
-void batch_normalization_layer(const Tensor<T> &in, Tensor<T> &out, const Tensor<T> &mean, const Tensor<T> &var, const Tensor<T> &beta, const Tensor<T> &gamma, float epsilon, int fixed_point_position)
-{
-    const int cols       = static_cast<int>(in.shape()[0]);
-    const int rows       = static_cast<int>(in.shape()[1]);
-    const int depth      = static_cast<int>(in.shape()[2]);
-    int       upper_dims = in.shape().total_size() / (cols * rows * depth);
-
-    for(int r = 0; r < upper_dims; ++r)
-    {
-        for(int i = 0; i < depth; ++i)
-        {
-            for(int k = 0; k < rows; ++k)
-            {
-                for(int l = 0; l < cols; ++l)
-                {
-                    const int   pos         = l + k * cols + i * rows * cols + r * cols * rows * depth;
-                    const float denominator = sqrt(var[i] + epsilon);
-                    const float numerator   = in[pos] - mean[i];
-                    const float x_bar       = numerator / denominator;
-                    out[pos]                = beta[i] + x_bar * gamma[i];
-                }
-            }
-        }
-    }
-}
-
 // ROI Pooling layer
 template <typename T>
 void roi_pooling_layer(const Tensor<T> &in, Tensor<T> &out, const std::vector<ROI> &rois, const ROIPoolingLayerInfo &pool_info)
diff --git a/tests/validation_old/TensorVisitors.h b/tests/validation_old/TensorVisitors.h
index dafbfe0..8af035b 100644
--- a/tests/validation_old/TensorVisitors.h
+++ b/tests/validation_old/TensorVisitors.h
@@ -128,33 +128,6 @@
     RoundingPolicy       _rounding_policy;
 };
 
-// Batch Normalization Layer visitor
-struct batch_normalization_layer_visitor : public boost::static_visitor<>
-{
-public:
-    explicit batch_normalization_layer_visitor(const TensorVariant &in, const TensorVariant &mean, const TensorVariant &var, const TensorVariant &beta, const TensorVariant &gamma, float epsilon,
-                                               int fixed_point_position = 0)
-        : _in(in), _mean(mean), _var(var), _beta(beta), _gamma(gamma), _epsilon(epsilon), _fixed_point_position(fixed_point_position)
-    {
-    }
-
-    template <typename T>
-    void operator()(Tensor<T> &out) const
-    {
-        const Tensor<T> &in    = boost::get<Tensor<T>>(_in);
-        const Tensor<T> &mean  = boost::get<Tensor<T>>(_mean);
-        const Tensor<T> &var   = boost::get<Tensor<T>>(_var);
-        const Tensor<T> &beta  = boost::get<Tensor<T>>(_beta);
-        const Tensor<T> &gamma = boost::get<Tensor<T>>(_gamma);
-        tensor_operations::batch_normalization_layer(in, out, mean, var, beta, gamma, _epsilon, _fixed_point_position);
-    }
-
-private:
-    const TensorVariant &_in, &_mean, &_var, &_beta, &_gamma;
-    float                _epsilon;
-    int                  _fixed_point_position;
-};
-
 // ROI Pooling layer
 struct roi_pooling_layer_visitor : public boost::static_visitor<>
 {