Implicit padding testing along the X axis on high priority operators

Add artificial implicit padding testing for the following fixtures:
- Scale
- FullyConnected
- Pooling
- DepthwiseConvolution
- DirectConvolution
- Winograd
- FFT
- GEMM/GEMMLowp

Create utility function that loops through a list of tensor and adds random padding based on the global seed (only for NHWC layer layout).

Remove GEMMLowpAssemblyFixture since it wasn't used
Remove some AssetsLibrary headers since they weren't used

Resolve COMPMID-4161

Change-Id: Ib6f4f7f113ae69b993d7b2a9e04abbf3de8c99fe
Signed-off-by: Giorgio Arena <giorgio.arena@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5327
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index 7ff2ab6..b53d46f 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -325,6 +325,30 @@
     return std::pair<int, int> { min_bound, max_bound };
 }
 
+void add_padding_x(std::initializer_list<ITensor *> tensors, const DataLayout &data_layout)
+{
+    if(data_layout == DataLayout::NHWC)
+    {
+        constexpr unsigned int lower = 1U;
+        constexpr unsigned int upper = 16U;
+
+        std::uniform_int_distribution<unsigned int> distribution(lower, upper);
+        size_t                                      seed_offset = 0;
+
+        for(ITensor *tensor : tensors)
+        {
+            ARM_COMPUTE_ERROR_ON(!tensor->info()->is_resizable());
+
+            std::mt19937 gen(library->seed() + seed_offset++);
+
+            const unsigned int right = distribution(gen);
+            const unsigned int left  = distribution(gen);
+
+            tensor->info()->extend_padding(PaddingSize(0U, right, 0U, left));
+        }
+    }
+}
+
 template void get_tile(const SimpleTensor<float> &in, SimpleTensor<float> &roi, const Coordinates &coord);
 template void get_tile(const SimpleTensor<half> &in, SimpleTensor<half> &roi, const Coordinates &coord);
 template void get_tile(const SimpleTensor<int> &in, SimpleTensor<int> &roi, const Coordinates &coord);
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 30ec14e..3ba3bd1 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -229,6 +229,15 @@
  * @param[in] channel_id Channel id for per channel quantization info.
  */
 std::pair<int, int> get_symm_quantized_per_channel_bounds(const QuantizationInfo &quant_info, float min, float max, size_t channel_id = 0);
+
+/** Add random padding along the X axis (between 1 and 16 columns per side) to all the input tensors
+ *
+ * @param[in] tensors     List of tensors to add padding to
+ * @param[in] data_layout (Optional) Data layout of the operator
+ *
+ * @note This function adds padding to the input tensors only if data_layout == DataLayout::NHWC
+ */
+void add_padding_x(std::initializer_list<ITensor *> tensors, const DataLayout &data_layout = DataLayout::NHWC);
 } // namespace validation
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/validation/NEON/GEMMLowp.cpp b/tests/validation/NEON/GEMMLowp.cpp
index 518f480..9d075e1 100644
--- a/tests/validation/NEON/GEMMLowp.cpp
+++ b/tests/validation/NEON/GEMMLowp.cpp
@@ -37,7 +37,6 @@
 #include "tests/framework/Macros.h"
 #include "tests/framework/datasets/Datasets.h"
 #include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/GEMMLowpAssemblyFixture.h"
 #include "tests/validation/fixtures/GEMMLowpFixture.h"
 
 namespace arm_compute
diff --git a/tests/validation/UNIT/GPUTarget.cpp b/tests/validation/UNIT/GPUTarget.cpp
index e1b7e1f..d2c81cf 100644
--- a/tests/validation/UNIT/GPUTarget.cpp
+++ b/tests/validation/UNIT/GPUTarget.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,7 +22,6 @@
  * SOFTWARE.
  */
 #include "arm_compute/core/GPUTarget.h"
-#include "tests/AssetsLibrary.h"
 #include "tests/Globals.h"
 #include "tests/Utils.h"
 #include "tests/framework/Asserts.h"
diff --git a/tests/validation/UNIT/SafeIntegerOps.cpp b/tests/validation/UNIT/SafeIntegerOps.cpp
index 62f7041..13e4ef5 100644
--- a/tests/validation/UNIT/SafeIntegerOps.cpp
+++ b/tests/validation/UNIT/SafeIntegerOps.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,7 +23,6 @@
  */
 #include "arm_compute/core/GPUTarget.h"
 #include "arm_compute/core/utils/math/SafeOps.h"
-#include "tests/AssetsLibrary.h"
 #include "tests/Globals.h"
 #include "tests/Utils.h"
 #include "tests/framework/Asserts.h"
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 07790e8..b649280 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -69,7 +69,8 @@
 public:
     template <typename...>
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
-               DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info, bool mixed_layout = false)
+               DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info,
+               bool mixed_layout = false)
     {
         _mixed_layout             = mixed_layout;
         _data_type                = data_type;
@@ -87,7 +88,6 @@
     }
 
 protected:
-
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         // Test Multi DataLayout graph cases, when the data layout changes after configure
@@ -214,6 +214,8 @@
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &weights, &bias, &dst }, _data_layout);
+
         // Allocate tensors
         src.allocator()->allocate();
         weights.allocator()->allocate();
diff --git a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
index 0aa43d8..e87e31f 100644
--- a/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DepthwiseConvolutionLayerFixture.h
@@ -116,6 +116,9 @@
 
     void allocate_and_run_target()
     {
+        // TODO: uncomment after COMPMID-4361
+        // add_padding_x({ &_src, &_weights, &_biases, &_target }, _data_layout);
+
         // Allocate tensors
         _src.allocator()->allocate();
         _weights.allocator()->allocate();
@@ -131,7 +134,7 @@
         fill(AccessorType(_src), 0);
         fill(AccessorType(_weights), 1);
         fill(AccessorType(_biases), 2);
-        
+
         if(_mixed_layout)
         {
             mix_layout(_dwc, _src, _target);
@@ -158,7 +161,6 @@
     }
 
 protected:
-
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         // Test Multi DataLayout graph cases, when the data layout changes after configure
@@ -237,7 +239,7 @@
     ActivationLayerInfo _act_info{};
     unsigned int        _depth_multiplier{};
     Size2D              _dilation{};
-    bool                _mixed_layout{false};
+    bool                _mixed_layout{ false };
 };
 
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
@@ -309,6 +311,8 @@
 
     void allocate_and_run_target()
     {
+        add_padding_x({ &_src, &_weights, &_biases, &_target }, _data_layout);
+
         // Allocate tensors
         _src.allocator()->allocate();
         _weights.allocator()->allocate();
@@ -442,6 +446,8 @@
 
     void allocate_and_run_target()
     {
+        add_padding_x({ &_src, &_weights, &_biases, &_target }, _data_layout);
+
         // Allocate tensors
         _src.allocator()->allocate();
         _weights.allocator()->allocate();
diff --git a/tests/validation/fixtures/DirectConvolutionLayerFixture.h b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
index 5ed0b9f..b79991e 100644
--- a/tests/validation/fixtures/DirectConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/DirectConvolutionLayerFixture.h
@@ -90,7 +90,6 @@
     }
 
 protected:
-    
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         DataLayout data_layout = src.info()->data_layout();
@@ -172,6 +171,9 @@
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        // TODO: uncomment after COMPMID-4341
+        // add_padding_x({ &src, &weights, &bias, &dst }, data_layout);
+
         // Allocate tensors
         src.allocator()->allocate();
         weights.allocator()->allocate();
@@ -221,7 +223,7 @@
     SimpleTensor<T>  _reference{};
     QuantizationInfo _quantization_info{};
     DataType         _data_type{};
-    bool             _mixed_layout {false};
+    bool             _mixed_layout{ false };
 };
 
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h
index 199730d..3a75135 100644
--- a/tests/validation/fixtures/FFTFixture.h
+++ b/tests/validation/fixtures/FFTFixture.h
@@ -91,6 +91,9 @@
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        // TODO: uncomment after COMPMID-4362
+        // add_padding_x({ &src, &dst });
+
         // Allocate tensors
         src.allocator()->allocate();
         dst.allocator()->allocate();
@@ -137,15 +140,14 @@
                DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false)
     {
         _mixed_layout = mixed_layout;
-        _data_type   = data_type;
-        _data_layout = data_layout;
+        _data_type    = data_type;
+        _data_layout  = data_layout;
 
         _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
         _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
     }
 
 protected:
-    
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         // Test Multi DataLayout graph cases, when the data layout changes after configure
@@ -210,6 +212,8 @@
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &weights, &bias, &dst }, _data_layout);
+
         // Allocate tensors
         src.allocator()->allocate();
         weights.allocator()->allocate();
@@ -225,7 +229,7 @@
         fill(AccessorType(src), 0);
         fill(AccessorType(weights), 1);
         fill(AccessorType(bias), 2);
-        
+
         if(_mixed_layout)
         {
             mix_layout(conv, src, dst);
@@ -261,7 +265,7 @@
     SimpleTensor<T> _reference{};
     DataType        _data_type{};
     DataLayout      _data_layout{};
-    bool            _mixed_layout{false};
+    bool            _mixed_layout{ false };
 };
 
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
diff --git a/tests/validation/fixtures/FullyConnectedLayerFixture.h b/tests/validation/fixtures/FullyConnectedLayerFixture.h
index 8f38aae..383d880 100644
--- a/tests/validation/fixtures/FullyConnectedLayerFixture.h
+++ b/tests/validation/fixtures/FullyConnectedLayerFixture.h
@@ -72,7 +72,6 @@
     }
 
 protected:
-
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         const DataLayout data_layout = src.info()->data_layout();
@@ -165,6 +164,8 @@
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &weights, &bias, &dst });
+
         // Allocate tensors
         src.allocator()->allocate();
         weights.allocator()->allocate();
@@ -238,7 +239,7 @@
     SimpleTensor<T>     _reference{};
     DataType            _data_type{};
     DataType            _bias_data_type{};
-    bool                _mixed_layout{false};
+    bool                _mixed_layout{ false };
     QuantizationInfo    _quantization_info{};
     ActivationLayerInfo _activation_info{};
 };
diff --git a/tests/validation/fixtures/GEMMFixture.h b/tests/validation/fixtures/GEMMFixture.h
index 500e094..45516d4 100644
--- a/tests/validation/fixtures/GEMMFixture.h
+++ b/tests/validation/fixtures/GEMMFixture.h
@@ -105,6 +105,8 @@
         ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &a, &b, &c, &dst });
+
         // Allocate tensors
         a.allocator()->allocate();
         b.allocator()->allocate();
@@ -231,6 +233,8 @@
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &lhs, &rhs, &bias, &dst });
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -347,6 +351,8 @@
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &lhs, &rhs, &bias, &dst });
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -483,6 +489,12 @@
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        // TODO: remove if statement after COMPMID-4368
+        if(!rhs_info.export_to_cl_image)
+        {
+            add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst });
+        }
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -625,6 +637,12 @@
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        // TODO: remove if statement after COMPMID-4368
+        if(!rhs_info.export_to_cl_image)
+        {
+            add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst });
+        }
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -787,6 +805,12 @@
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        // TODO: remove if statement after COMPMID-4368
+        if(!rhs_info.export_to_cl_image)
+        {
+            add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst });
+        }
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -955,6 +979,12 @@
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        // TODO: remove if statement after COMPMID-4368
+        if(!rhs_info.export_to_cl_image)
+        {
+            add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &bias, &dst });
+        }
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -1118,6 +1148,12 @@
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        // TODO: remove if statement after COMPMID-4368
+        if(!rhs_info.export_to_cl_image)
+        {
+            add_padding_x({ &lhs, &rhs, &rhs_reshaped, &bias, &dst });
+        }
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -1277,6 +1313,12 @@
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        // TODO: remove if statement after COMPMID-4368
+        if(!rhs_info.export_to_cl_image)
+        {
+            add_padding_x({ &lhs, &rhs, &rhs_reshaped, &bias, &dst });
+        }
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -1410,6 +1452,8 @@
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &lhs, &rhs, &bias, &dst });
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -1539,6 +1583,8 @@
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &lhs, &rhs, &bias, &dst });
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
diff --git a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h b/tests/validation/fixtures/GEMMLowpAssemblyFixture.h
deleted file mode 100644
index e9ec1bc..0000000
--- a/tests/validation/fixtures/GEMMLowpAssemblyFixture.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2017-2019 Arm Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_GEMMLOWP_ASSEMBLY_FIXTURE
-#define ARM_COMPUTE_TEST_GEMMLOWP_ASSEMBLY_FIXTURE
-
-#include "arm_compute/core/TensorShape.h"
-#include "arm_compute/core/Types.h"
-#include "tests/AssetsLibrary.h"
-#include "tests/Globals.h"
-#include "tests/IAccessor.h"
-#include "tests/framework/Asserts.h"
-#include "tests/framework/Fixture.h"
-#include "tests/validation/Helpers.h"
-#include "tests/validation/reference/GEMMLowp.h"
-
-#include <random>
-
-namespace arm_compute
-{
-namespace test
-{
-namespace validation
-{
-template <typename TensorType, typename AccessorType, typename FunctionType, typename T2>
-class GEMMLowpAssemblyFixture : public framework::Fixture
-{
-public:
-    template <typename...>
-    void setup(size_t m, size_t n, size_t k)
-    {
-        const TensorShape shape_a(k, m);
-        const TensorShape shape_b(n, k);
-        const TensorShape shape_c(n, m);
-        _target    = compute_target(shape_a, shape_b, shape_c);
-        _reference = compute_reference(shape_a, shape_b, shape_c);
-    }
-
-protected:
-    template <typename U>
-    void fill(U &&tensor, int i, int lo, int hi)
-    {
-        std::uniform_int_distribution<> distribution(lo, hi);
-        library->fill(tensor, distribution, i);
-    }
-
-    TensorType compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
-    {
-        DataType dt_in = std::is_same<T2, int8_t>::value ? DataType::S8 : DataType::U8;
-
-        // Create tensors
-        TensorType a = create_tensor<TensorType>(shape_a, dt_in, 1);
-        TensorType b = create_tensor<TensorType>(shape_b, dt_in, 1);
-        TensorType c = create_tensor<TensorType>(shape_c, DataType::S32, 1);
-
-        // Create and configure function
-        FunctionType gemmlowp;
-        gemmlowp.configure(&a, &b, nullptr, &c);
-
-        ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
-        ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
-        ARM_COMPUTE_EXPECT(c.info()->is_resizable(), framework::LogLevel::ERRORS);
-
-        // Allocate tensors
-        a.allocator()->allocate();
-        b.allocator()->allocate();
-        c.allocator()->allocate();
-
-        ARM_COMPUTE_EXPECT(!a.info()->is_resizable(), framework::LogLevel::ERRORS);
-        ARM_COMPUTE_EXPECT(!b.info()->is_resizable(), framework::LogLevel::ERRORS);
-        ARM_COMPUTE_EXPECT(!c.info()->is_resizable(), framework::LogLevel::ERRORS);
-
-        // Fill tensors
-        if(dt_in == DataType::S8)
-        {
-            fill(AccessorType(a), 0, -128, 127);
-            fill(AccessorType(b), 1, -128, 127);
-        }
-        else
-        {
-            fill(AccessorType(a), 0, 0, 255);
-            fill(AccessorType(b), 1, 0, 255);
-        }
-        fill(AccessorType(c), 2, 0, 0);
-
-        // Compute GEMM function
-        gemmlowp.run();
-        return c;
-    }
-
-    SimpleTensor<int32_t> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &shape_c)
-    {
-        DataType dt = std::is_same<T2, int8_t>::value ? DataType::S8 : DataType::U8;
-
-        // Create reference
-        SimpleTensor<T2> a{ shape_a, dt, 1 };
-        SimpleTensor<T2> b{ shape_b, dt, 1 };
-
-        // Fill reference
-        if(dt == DataType::S8)
-        {
-            fill(a, 0, -128, 127);
-            fill(b, 1, -128, 127);
-        }
-        else
-        {
-            fill(a, 0, 0, 255);
-            fill(b, 1, 0, 255);
-        }
-
-        return reference::gemmlowp<int32_t, T2>(a, b, shape_c);
-    }
-
-    TensorType            _target{};
-    SimpleTensor<int32_t> _reference{};
-};
-
-} // namespace validation
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_GEMMLOWP_FIXTURE */
diff --git a/tests/validation/fixtures/GEMMLowpFixture.h b/tests/validation/fixtures/GEMMLowpFixture.h
index 95f4960..c3da2e2 100644
--- a/tests/validation/fixtures/GEMMLowpFixture.h
+++ b/tests/validation/fixtures/GEMMLowpFixture.h
@@ -133,6 +133,8 @@
     ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(output.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+    add_padding_x({ &a, &b, &output });
+
     // Allocate tensors
     a.allocator()->allocate();
     b.allocator()->allocate();
@@ -948,6 +950,8 @@
         ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst });
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -1098,6 +1102,8 @@
         ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &lhs, &rhs, &lhs_reshaped, &rhs_reshaped, &dst });
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -1247,6 +1253,8 @@
         ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst });
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -1390,6 +1398,8 @@
         ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &lhs, &rhs, &rhs_reshaped, &dst });
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -1502,6 +1512,8 @@
         ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &lhs, &rhs, &dst });
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
@@ -1597,6 +1609,8 @@
         ARM_COMPUTE_EXPECT(lhs.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(rhs.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &lhs, &rhs, &dst });
+
         // Allocate tensors
         lhs.allocator()->allocate();
         rhs.allocator()->allocate();
diff --git a/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h
index d085509..4ee493b 100644
--- a/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h
+++ b/tests/validation/fixtures/GEMMReshapeLHSMatrixFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -91,6 +91,8 @@
 
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &dst });
+
         // Allocate tensors
         src.allocator()->allocate();
         dst.allocator()->allocate();
diff --git a/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h
index 99bfa3b..3f73912 100644
--- a/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h
+++ b/tests/validation/fixtures/GEMMReshapeRHSMatrixFixture.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -90,6 +90,8 @@
 
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &dst });
+
         // Allocate tensors
         src.allocator()->allocate();
         dst.allocator()->allocate();
diff --git a/tests/validation/fixtures/PoolingLayerFixture.h b/tests/validation/fixtures/PoolingLayerFixture.h
index ee81ff5..66e09d5 100644
--- a/tests/validation/fixtures/PoolingLayerFixture.h
+++ b/tests/validation/fixtures/PoolingLayerFixture.h
@@ -50,13 +50,12 @@
                QuantizationInfo input_qinfo = QuantizationInfo(), QuantizationInfo output_qinfo = QuantizationInfo(), bool mixed_layout = false)
     {
         _mixed_layout = mixed_layout;
-        _pool_info = pool_info;
-        _target    = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
-        _reference = compute_reference(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
+        _pool_info    = pool_info;
+        _target       = compute_target(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
+        _reference    = compute_reference(shape, pool_info, data_type, data_layout, input_qinfo, output_qinfo, indices);
     }
 
 protected:
-
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         const DataLayout data_layout = src.info()->data_layout();
@@ -115,6 +114,9 @@
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(_target_indices.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        // TODO: uncomment after COMPMID-4363
+        // add_padding_x({ &src, &dst, &_target_indices }, data_layout);
+
         // Allocate tensors
         src.allocator()->allocate();
         dst.allocator()->allocate();
@@ -152,7 +154,7 @@
     TensorType             _target{};
     SimpleTensor<T>        _reference{};
     PoolingLayerInfo       _pool_info{};
-    bool                   _mixed_layout{false};
+    bool                   _mixed_layout{ false };
     TensorType             _target_indices{};
     SimpleTensor<uint32_t> _ref_indices{};
 };
diff --git a/tests/validation/fixtures/ScaleFixture.h b/tests/validation/fixtures/ScaleFixture.h
index 9e0f620..a40cfda 100644
--- a/tests/validation/fixtures/ScaleFixture.h
+++ b/tests/validation/fixtures/ScaleFixture.h
@@ -68,7 +68,6 @@
     }
 
 protected:
-
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         const DataLayout data_layout = src.info()->data_layout();
@@ -163,6 +162,8 @@
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &dst }, data_layout);
+
         // Allocate tensors
         src.allocator()->allocate();
         dst.allocator()->allocate();
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index f956963..a1433e9 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -62,12 +62,11 @@
     {
         ARM_COMPUTE_UNUSED(dilation);
         _mixed_layout = mixed_layout;
-        _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
-        _reference = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
+        _target       = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, data_type, act_info, data_layout);
+        _reference    = compute_reference(input_shape, weights_shape, bias_shape, info, data_type, act_info);
     }
 
 protected:
-
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         const DataLayout data_layout = src.info()->data_layout();
@@ -134,6 +133,8 @@
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &weights, &bias, &dst }, data_layout);
+
         // Allocate tensors
         src.allocator()->allocate();
         weights.allocator()->allocate();
@@ -235,7 +236,7 @@
 
     TensorType      _target{};
     SimpleTensor<T> _reference{};
-    bool            _mixed_layout{false};
+    bool            _mixed_layout{ false };
 };
 
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>
@@ -246,13 +247,12 @@
     void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
     {
         TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
-        _mixed_layout = mixed_layout;
-        _target    = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
-        _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
+        _mixed_layout            = mixed_layout;
+        _target                  = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
+        _reference               = compute_reference(input_shape, output_shape, winograd_info, data_type);
     }
 
 protected:
-
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         const DataLayout data_layout_src = src.info()->data_layout();
@@ -311,6 +311,8 @@
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &dst }, data_layout);
+
         // Allocate tensors
         src.allocator()->allocate();
         dst.allocator()->allocate();
@@ -344,7 +346,7 @@
         return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
     }
 
-    bool _mixed_layout {false};
+    bool            _mixed_layout{ false };
     TensorType      _target{};
     SimpleTensor<T> _reference{};
 };
@@ -360,12 +362,11 @@
         TensorShape  output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
 
         _mixed_layout = mixed_layout;
-        _target    = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
-        _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
+        _target       = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
+        _reference    = compute_reference(input_shape, output_shape, winograd_info, data_type);
     }
 
 protected:
-
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         const DataLayout data_layout_src = src.info()->data_layout();
@@ -425,6 +426,8 @@
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &dst }, data_layout);
+
         // Allocate tensors
         src.allocator()->allocate();
         dst.allocator()->allocate();
@@ -458,7 +461,7 @@
         return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
     }
 
-    bool    _mixed_layout {false};
+    bool            _mixed_layout{ false };
     TensorType      _target{};
     SimpleTensor<T> _reference{};
 };
@@ -475,7 +478,6 @@
     }
 
 protected:
-
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         const DataLayout data_layout_src = src.info()->data_layout();
@@ -534,6 +536,8 @@
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &bias, &dst }, winograd_info.output_data_layout);
+
         // Allocate tensors
         src.allocator()->allocate();
         bias.allocator()->allocate();
@@ -577,7 +581,7 @@
         return (act_info.enabled()) ? reference::activation_layer<T>(winograd_output, act_info) : winograd_output;
     }
 
-    bool    _mixed_layout {false};
+    bool            _mixed_layout{ false };
     TensorType      _target{};
     SimpleTensor<T> _reference{};
 };