Implicit padding testing along the X axis on high priority operators

Add artificial implicit padding testing for the following fixtures:
- Scale
- FullyConnected
- Pooling
- DepthwiseConvolution
- DirectConvolution
- Winograd
- FFT
- GEMM/GEMMLowp

Create utility function that loops through a list of tensor and adds random padding based on the global seed (only for NHWC layer layout).

Remove GEMMLowpAssemblyFixture since it wasn't used
Remove some AssetsLibrary headers since they weren't used

Resolve COMPMID-4161

Change-Id: Ib6f4f7f113ae69b993d7b2a9e04abbf3de8c99fe
Signed-off-by: Giorgio Arena <giorgio.arena@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5327
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/validation/fixtures/FFTFixture.h b/tests/validation/fixtures/FFTFixture.h
index 199730d..3a75135 100644
--- a/tests/validation/fixtures/FFTFixture.h
+++ b/tests/validation/fixtures/FFTFixture.h
@@ -91,6 +91,9 @@
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        // TODO: uncomment after COMPMID-4362
+        // add_padding_x({ &src, &dst });
+
         // Allocate tensors
         src.allocator()->allocate();
         dst.allocator()->allocate();
@@ -137,15 +140,14 @@
                DataType data_type, DataLayout data_layout, ActivationLayerInfo act_info, bool mixed_layout = false)
     {
         _mixed_layout = mixed_layout;
-        _data_type   = data_type;
-        _data_layout = data_layout;
+        _data_type    = data_type;
+        _data_layout  = data_layout;
 
         _target    = compute_target(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
         _reference = compute_reference(input_shape, weights_shape, bias_shape, output_shape, info, dilation, act_info);
     }
 
 protected:
-    
     void mix_layout(FunctionType &layer, TensorType &src, TensorType &dst)
     {
         // Test Multi DataLayout graph cases, when the data layout changes after configure
@@ -210,6 +212,8 @@
         ARM_COMPUTE_EXPECT(bias.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
 
+        add_padding_x({ &src, &weights, &bias, &dst }, _data_layout);
+
         // Allocate tensors
         src.allocator()->allocate();
         weights.allocator()->allocate();
@@ -225,7 +229,7 @@
         fill(AccessorType(src), 0);
         fill(AccessorType(weights), 1);
         fill(AccessorType(bias), 2);
-        
+
         if(_mixed_layout)
         {
             mix_layout(conv, src, dst);
@@ -261,7 +265,7 @@
     SimpleTensor<T> _reference{};
     DataType        _data_type{};
     DataLayout      _data_layout{};
-    bool            _mixed_layout{false};
+    bool            _mixed_layout{ false };
 };
 
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T, bool mixed_layout = false>