Implicit padding testing along the X axis on high priority operators

Add artificial implicit padding testing for the following fixtures:
- Scale
- FullyConnected
- Pooling
- DepthwiseConvolution
- DirectConvolution
- Winograd
- FFT
- GEMM/GEMMLowp

Create utility function that loops through a list of tensor and adds random padding based on the global seed (only for NHWC layer layout).

Remove GEMMLowpAssemblyFixture since it wasn't used
Remove some AssetsLibrary headers since they weren't used

Resolve COMPMID-4161

Change-Id: Ib6f4f7f113ae69b993d7b2a9e04abbf3de8c99fe
Signed-off-by: Giorgio Arena <giorgio.arena@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5327
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/validation/fixtures/ConvolutionLayerFixture.h b/tests/validation/fixtures/ConvolutionLayerFixture.h
index 07790e8..b649280 100644
--- a/tests/validation/fixtures/ConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/ConvolutionLayerFixture.h
@@ -69,7 +69,8 @@
 public:
     template <typename...>
     void setup(TensorShape input_shape, TensorShape weights_shape, TensorShape bias_shape, TensorShape output_shape, PadStrideInfo info, Size2D dilation, bool reshape_weights,
-               DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info, bool mixed_layout = false)
+               DataType data_type, DataType weights_data_type, DataLayout data_layout, QuantizationInfo quantization_info, QuantizationInfo weight_quantization_info, ActivationLayerInfo act_info,
+               bool mixed_layout = false)
     {
         _mixed_layout             = mixed_layout;
         _data_type                = data_type;
@@ -87,7 +88,6 @@
     }
 
 protected:
-
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         // Test Multi DataLayout graph cases, when the data layout changes after configure
@@ -214,6 +214,8 @@
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &weights, &bias, &dst }, _data_layout);
+
         // Allocate tensors
         src.allocator()->allocate();
         weights.allocator()->allocate();