COMPMID-1201 - Implementing Winograd Convolution Layer 1x3 and 3x1 kernels on OpenCL

Change-Id: I39667bab49daa4da009694163274a59fd3574c73
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/137595
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index b869f4c..f68ec8c 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -23,6 +23,7 @@
  */
 #include "arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h"
 #include "arm_compute/core/CL/kernels/CLWinogradOutputTransformKernel.h"
+#include "arm_compute/core/Helpers.h"
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
@@ -51,12 +52,66 @@
 {
 namespace
 {
+// *INDENT-OFF*
+// clang-format off
 constexpr AbsoluteTolerance<float> tolerance_f32(0.001f);
 constexpr AbsoluteTolerance<float> tolerance_convolution_layer_f32(0.1f);
-const auto                         SmallWinogradInputTransformDataset = framework::dataset::concat(datasets::SmallWinogradInputTransformDataset2x2_3x3(),
-                                                                                                   framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x4_3x3(), datasets::SmallWinogradInputTransformDataset4x4_5x5()));
-const auto LargeWinogradInputTransformDataset = framework::dataset::concat(datasets::LargeWinogradInputTransformDataset2x2_3x3(),
-                                                                           framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x4_3x3(), datasets::LargeWinogradInputTransformDataset4x4_5x5()));
+
+// Input transform
+const auto SmallWinogradInputTransformDatasetNCHW =
+           framework::dataset::concat(datasets::SmallWinogradInputTransformDataset2x2_3x3(),
+           framework::dataset::concat(datasets::SmallWinogradInputTransformDataset2x1_3x1(),
+           framework::dataset::concat(datasets::SmallWinogradInputTransformDataset1x2_1x3(),
+           framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x4_3x3(),
+           framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x1_3x1(),
+           framework::dataset::concat(datasets::SmallWinogradInputTransformDataset1x4_1x3(),
+                                      datasets::SmallWinogradInputTransformDataset4x4_5x5()))))));
+
+const auto SmallWinogradInputTransformDatasetNHWC = framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x4_3x3(),
+                                                                               datasets::SmallWinogradInputTransformDataset4x4_5x5());
+
+const auto LargeWinogradInputTransformDatasetNCHW =
+           framework::dataset::concat(datasets::LargeWinogradInputTransformDataset2x2_3x3(),
+           framework::dataset::concat(datasets::LargeWinogradInputTransformDataset2x1_3x1(),
+           framework::dataset::concat(datasets::LargeWinogradInputTransformDataset1x2_1x3(),
+           framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x4_3x3(),
+           framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x1_3x1(),
+           framework::dataset::concat(datasets::LargeWinogradInputTransformDataset1x4_1x3(),
+                                      datasets::LargeWinogradInputTransformDataset4x4_5x5()))))));
+
+const auto LargeWinogradInputTransformDatasetNHWC =
+           framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x4_3x3(),
+                                      datasets::LargeWinogradInputTransformDataset4x4_5x5());
+
+// Filter transform
+const auto SmallWinogradFilterTransformDatasetNCHW =
+           framework::dataset::concat(combine(datasets::Small3x3Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+           framework::dataset::concat(combine(datasets::Small3x1Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 1U), Size2D(4U, 1U) })),
+           framework::dataset::concat(combine(datasets::Small1x3Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 2U), Size2D(1U, 4U) })),
+                                      combine(datasets::Small5x5Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })))));
+
+const auto SmallWinogradFilterTransformDatasetNHWC =
+           framework::dataset::concat(combine(datasets::Small3x3Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })),
+                                      combine(datasets::Small5x5Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })));
+
+const auto LargeWinogradFilterTransformDatasetNCHW =
+           framework::dataset::concat(combine(datasets::Large3x3Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+           framework::dataset::concat(combine(datasets::Large3x1Shapes(), framework::dataset::make("OutputTile", { Size2D(2U, 1U), Size2D(4U, 1U) })),
+           framework::dataset::concat(combine(datasets::Large1x3Shapes(), framework::dataset::make("OutputTile", { Size2D(1U, 2U), Size2D(1U, 4U) })),
+                                      combine(datasets::Large5x5Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })))));
+
+const auto LargeWinogradFilterTransformDatasetNHWC =
+           framework::dataset::concat(combine(datasets::Large3x3Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })),
+                                      combine(datasets::Large5x5Shapes(), framework::dataset::make("OutputTile", { Size2D(4U, 4U) })));
+
+// Output transform
+const auto SmallWinogradOutputTransformDatasetNCHW = datasets::SmallWinogradOutputTransformDatasetNCHW();
+
+const auto SmallWinogradOutputTransformDatasetNHWC = datasets::SmallWinogradOutputTransformDatasetNHWC();
+
+const auto LargeWinogradOutputTransformDatasetNCHW = datasets::LargeWinogradOutputTransformDatasetNCHW();
+
+const auto LargeWinogradOutputTransformDatasetNHWC = datasets::LargeWinogradOutputTransformDatasetNHWC();
 } // namespace
 
 using namespace arm_compute::misc::shape_calculator;
@@ -65,9 +120,6 @@
 TEST_SUITE(Winograd)
 
 TEST_SUITE(InputTransform)
-
-// *INDENT-OFF*
-// clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
                                                 framework::dataset::make("InputInfo",{
                                                                                         TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::F16),     // F16 not supported
@@ -101,17 +153,20 @@
 {
     ARM_COMPUTE_EXPECT(bool(CLWinogradInputTransform::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
 }
-// clang-format on
-// *INDENT-ON*
 
 using CLWinogradInputTransformFixture = WinogradInputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradInputTransform, float>;
 
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(SmallWinogradInputTransformDataset, LargeWinogradInputTransformDataset),
+TEST_SUITE(NCHW)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(SmallWinogradInputTransformDatasetNCHW,
+                                                                                                      LargeWinogradInputTransformDatasetNCHW),
                                                                            framework::dataset::make("DataLayout", { DataLayout::NCHW })),
-                                                                   framework::dataset::make("DataType", { DataType::F32 })),
+                                                                           framework::dataset::make("DataType", { DataType::F32 })),
                shape_in, winograd_info, data_layout, data_type)
 {
-    TensorShape shape_out = compute_winograd_input_transform_shape(TensorInfo(shape_in, 1, data_type), winograd_info);
+    TensorInfo  tensor_info_in(shape_in, 1, data_type);
+    tensor_info_in.set_data_layout(data_layout);
+
+    TensorShape shape_out = compute_winograd_input_transform_shape(tensor_info_in, winograd_info);
 
     // Create tensors
     CLTensor in  = create_tensor<CLTensor>(shape_in, data_type, 1, 0, QuantizationInfo(), data_layout);
@@ -127,28 +182,70 @@
     winograd_input_transform.configure(&in, &out, winograd_info);
 }
 
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixture, framework::DatasetMode::PRECOMMIT, combine(framework::dataset::concat(combine(SmallWinogradInputTransformDataset,
-                                                                                                             framework::dataset::make("DataLayout", { DataLayout::NCHW })),
-                                                                                                             combine(framework::dataset::concat(datasets::SmallWinogradInputTransformDataset4x4_3x3(), datasets::SmallWinogradInputTransformDataset4x4_5x5()),
-                                                                                                                     framework::dataset::make("DataLayout", { DataLayout::NHWC }))),
-                                                                                                             framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixture, framework::DatasetMode::PRECOMMIT, combine(combine(SmallWinogradInputTransformDatasetNCHW,
+                                                                                                                     framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+                                                                                                                     framework::dataset::make("DataType", { DataType::F32 })))
 {
     validate(CLAccessor(_target), _reference, tolerance_f32);
 }
 
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixture, framework::DatasetMode::NIGHTLY, combine(framework::dataset::concat(combine(LargeWinogradInputTransformDataset,
-                                                                                                           framework::dataset::make("DataLayout", { DataLayout::NCHW })),
-                                                                                                           combine(framework::dataset::concat(datasets::LargeWinogradInputTransformDataset4x4_3x3(), datasets::LargeWinogradInputTransformDataset4x4_5x5()),
-                                                                                                                   framework::dataset::make("DataLayout", { DataLayout::NHWC }))),
-                                                                                                           framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradInputTransformDatasetNCHW,
+                                                                                                                   framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+                                                                                                                   framework::dataset::make("DataType", { DataType::F32 })))
 {
     validate(CLAccessor(_target), _reference, tolerance_f32);
 }
+TEST_SUITE_END() // NCHW
+
+TEST_SUITE(NHWC)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(SmallWinogradInputTransformDatasetNHWC,
+                                                                                                      LargeWinogradInputTransformDatasetNHWC),
+                                                                           framework::dataset::make("DataLayout", { DataLayout::NHWC })),
+                                                                           framework::dataset::make("DataType", { DataType::F32 })),
+               shape_in, winograd_info, data_layout, data_type)
+{
+    TensorShape shape_in_nhwc(shape_in);
+
+    // Convert the shape to NHWC
+    permute(shape_in_nhwc, PermutationVector(2U, 0U, 1U));
+
+    // TensorInfo
+    TensorInfo  tensor_info_in(shape_in_nhwc, 1, data_type);
+    tensor_info_in.set_data_layout(data_layout);
+
+    TensorShape shape_out = compute_winograd_input_transform_shape(tensor_info_in, winograd_info);
+
+    // Create tensors
+    CLTensor in  = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, 0, QuantizationInfo(), data_layout);
+    CLTensor out = create_tensor<CLTensor>(shape_out, data_type);
+
+    ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
+    ARM_COMPUTE_EXPECT(out.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+    // Create and configure function
+    CLWinogradInputTransform winograd_input_transform;
+
+    // Configure the function
+    winograd_input_transform.configure(&in, &out, winograd_info);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixture, framework::DatasetMode::PRECOMMIT, combine(combine(SmallWinogradInputTransformDatasetNHWC,
+                                                                                                                     framework::dataset::make("DataLayout", { DataLayout::NHWC })),
+                                                                                                                     framework::dataset::make("DataType", { DataType::F32 })))
+{
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(LargeWinogradInputTransformDatasetNHWC,
+                                                                                                                   framework::dataset::make("DataLayout", { DataLayout::NHWC })),
+                                                                                                                   framework::dataset::make("DataType", { DataType::F32 })))
+{
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END() // NHWC
 TEST_SUITE_END() // InputTransform
 
 TEST_SUITE(FilterTransform)
-// *INDENT-OFF*
-// clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
                                                 framework::dataset::make("InputInfo",{
                                                                                         TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::F16),     // F16 not supported
@@ -182,19 +279,19 @@
 {
     ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
 }
-// clang-format on
-// *INDENT-ON*
 
 using CLWinogradFilterTransform        = CLSynthetizeFunctionWithZeroConstantBorder<CLWinogradFilterTransformKernel, 0>;
 using CLWinogradFilterTransformFixture = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, float>;
 
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::Small3x3Shapes(), datasets::Large3x3Shapes()),
-                                                                                   framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
-                                                                           framework::dataset::make("DataLayout", { DataLayout::NCHW })),
-                                                                   framework::dataset::make("DataType", { DataType::F32 })),
+TEST_SUITE(NCHW)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL,
+               combine(combine(framework::dataset::concat(SmallWinogradFilterTransformDatasetNCHW,
+                                                          LargeWinogradFilterTransformDatasetNCHW),
+                                                          framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+                                                          framework::dataset::make("DataType", { DataType::F32 })),
                shape_a, output_tile, data_layout, data_type)
 {
-    WinogradInfo winograd_info(output_tile, Size2D(shape_a[0], shape_a[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
+    WinogradInfo winograd_info(output_tile, Size2D(shape_a[0], shape_a[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, data_layout /* Not needed */);
 
     TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
 
@@ -210,37 +307,79 @@
     winograd_filter_transform.configure(&a, &b, winograd_info);
 }
 
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::ALL,
-                       combine(framework::dataset::concat(combine(framework::dataset::concat(framework::dataset::concat(combine(datasets::Small3x3Shapes(), framework::dataset::make("OutputTile", Size2D(2U, 2U))),
-                                                                                                                        combine(datasets::Small3x3Shapes(),
-                                                                                                                                framework::dataset::make("OutputTile", Size2D(4U, 4U)))),
-                                                                                             combine(datasets::Small5x5Shapes(), framework::dataset::make("OutputTile", Size2D(4U, 4U)))),
-                                                                  framework::dataset::make("DataLayout", { DataLayout::NCHW })),
-                                                          combine(combine(framework::dataset::concat(datasets::Small3x3Shapes(), datasets::Small5x5Shapes()), framework::dataset::make("OutputTile", Size2D(4U, 4U))), framework::dataset::make("DataLayout", { DataLayout::NHWC }))),
-                               framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(SmallWinogradFilterTransformDatasetNCHW,
+                                       framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+                                       framework::dataset::make("DataType", { DataType::F32 })))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f32);
 }
 
 FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY,
-                       combine(framework::dataset::concat(combine(framework::dataset::concat(framework::dataset::concat(combine(datasets::Large3x3Shapes(), framework::dataset::make("OutputTile", Size2D(2U, 2U))),
-                                                                                                                        combine(datasets::Large3x3Shapes(),
-                                                                                                                                framework::dataset::make("OutputTile", Size2D(4U, 4U)))),
-                                                                                             combine(datasets::Large5x5Shapes(), framework::dataset::make("OutputTile", Size2D(4U, 4U)))),
-                                                                  framework::dataset::make("DataLayout", { DataLayout::NCHW })),
-                                                          combine(combine(framework::dataset::concat(datasets::Large3x3Shapes(), datasets::Large5x5Shapes()), framework::dataset::make("OutputTile", Size2D(4U, 4U))), framework::dataset::make("DataLayout", { DataLayout::NHWC }))),
-                               framework::dataset::make("DataType", { DataType::F32 })))
+                       combine(combine(LargeWinogradFilterTransformDatasetNCHW,
+                                       framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+                                       framework::dataset::make("DataType", { DataType::F32 })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END() // NCHW
+
+TEST_SUITE(NHWC)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL,
+               combine(combine(framework::dataset::concat(SmallWinogradFilterTransformDatasetNHWC,
+                                                          LargeWinogradFilterTransformDatasetNHWC),
+                                                          framework::dataset::make("DataLayout", { DataLayout::NHWC })),
+                                                          framework::dataset::make("DataType", { DataType::F32 })),
+               shape_in, output_tile, data_layout, data_type)
+{
+    TensorShape shape_in_nhwc(shape_in);
+
+    // Convert the shape to NHWC
+    permute(shape_in_nhwc, PermutationVector(2U, 0U, 1U));
+
+    // TensorInfo
+    TensorInfo  tensor_info_in(shape_in_nhwc, 1, data_type);
+    tensor_info_in.set_data_layout(data_layout);
+
+    WinogradInfo winograd_info(output_tile, Size2D(shape_in[0], shape_in[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, data_layout /* Not needed */);
+
+    TensorShape shape_b = compute_winograd_filter_transform_shape(tensor_info_in, winograd_info);
+
+    // Create tensors
+    CLTensor a = create_tensor<CLTensor>(shape_in_nhwc, data_type, 1, 0, QuantizationInfo(), data_layout);
+    CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), data_layout);
+
+    ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+    ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+    // Create and configure function
+    CLWinogradFilterTransform winograd_filter_transform;
+    winograd_filter_transform.configure(&a, &b, winograd_info);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(SmallWinogradFilterTransformDatasetNHWC,
+                                       framework::dataset::make("DataLayout", { DataLayout::NHWC })),
+                                       framework::dataset::make("DataType", { DataType::F32 })))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f32);
 }
 
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY,
+                       combine(combine(LargeWinogradFilterTransformDatasetNHWC,
+                                       framework::dataset::make("DataLayout", { DataLayout::NHWC })),
+                                       framework::dataset::make("DataType", { DataType::F32 })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END() // NHWC
 TEST_SUITE_END() // FilterTransform
 
 TEST_SUITE(OutputTransform)
-// *INDENT-OFF*
-// clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
                                                 framework::dataset::make("InputInfo",{
                                                                                         TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F16),      // F16 not supported
@@ -291,14 +430,14 @@
 {
     ARM_COMPUTE_EXPECT(bool(CLWinogradOutputTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
 }
-// clang-format on
-// *INDENT-ON*
 
 using CLWinogradOutputTransform        = CLSynthetizeFunctionWithZeroConstantBorder<CLWinogradOutputTransformKernel, 0>;
 using CLWinogradOutputTransformFixture = WinogradOutputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradOutputTransform, float>;
 
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallWinogradOutputTransformDataset(), datasets::LargeWinogradOutputTransformDataset()),
-                                                                   framework::dataset::make("DataType", { DataType::F32 })),
+TEST_SUITE(NCHW)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(SmallWinogradOutputTransformDatasetNCHW,
+                                                                                              LargeWinogradOutputTransformDatasetNCHW),
+                                                                                              framework::dataset::make("DataType", { DataType::F32 })),
                shape_a, winograd_info, data_type)
 {
     TensorShape shape_b = compute_winograd_output_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
@@ -315,23 +454,62 @@
     winograd_output_transform.configure(&a, nullptr, &b, winograd_info);
 }
 
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixture, framework::DatasetMode::ALL, combine(datasets::SmallWinogradOutputTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixture, framework::DatasetMode::ALL,
+                       combine(SmallWinogradOutputTransformDatasetNCHW,
+                               framework::dataset::make("DataType", { DataType::F32 })))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f32);
 }
 
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradOutputTransformFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradOutputTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradOutputTransformFixture, framework::DatasetMode::NIGHTLY,
+                       combine(LargeWinogradOutputTransformDatasetNCHW,
+                               framework::dataset::make("DataType", { DataType::F32 })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END() // NCHW
+
+TEST_SUITE(NHWC)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(SmallWinogradOutputTransformDatasetNHWC,
+                                                                                              LargeWinogradOutputTransformDatasetNHWC),
+                                                                                              framework::dataset::make("DataType", { DataType::F32 })),
+               shape_a, winograd_info, data_type)
+{
+    TensorShape shape_b = compute_winograd_output_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
+
+    // Create tensors
+    CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
+    CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
+
+    ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+    ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+    // Create and configure function
+    CLWinogradOutputTransform winograd_output_transform;
+    winograd_output_transform.configure(&a, nullptr, &b, winograd_info);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixture, framework::DatasetMode::ALL,
+                       combine(SmallWinogradOutputTransformDatasetNHWC,
+                               framework::dataset::make("DataType", { DataType::F32 })))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f32);
 }
 
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradOutputTransformFixture, framework::DatasetMode::NIGHTLY,
+                       combine(LargeWinogradOutputTransformDatasetNHWC,
+                               framework::dataset::make("DataType", { DataType::F32 })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END() // NHWC
 TEST_SUITE_END() // OutputTransform
 
 TEST_SUITE(ConvolutionLayer)
-// *INDENT-OFF*
-// clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(
                                                 framework::dataset::make("InputInfo", {
                                                                                         TensorInfo(TensorShape(17U, 31U, 2U), 1, DataType::F16),     // FP16 not supported
@@ -373,16 +551,14 @@
 {
     ARM_COMPUTE_EXPECT(bool(CLWinogradConvolutionLayer::validate(&input_info.clone()->set_is_resizable(false), &weights_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info)) == expected, framework::LogLevel::ERRORS);
 }
-// clang-format on
-// *INDENT-ON*
 
 using CLWinogradConvolutionLayerFastMathFixture = WinogradConvolutionLayerFastMathValidationFixture<CLTensor, CLAccessor, CLWinogradConvolutionLayer, float>;
 TEST_SUITE(Conv3x3)
 FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::PRECOMMIT,
                        combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x3Dataset(),
                                                framework::dataset::make("DataType", { DataType::F32 })),
-                                       framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
-                               framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+                                               framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
+                                               framework::dataset::make("DataLayout", { DataLayout::NCHW })))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32);
@@ -391,20 +567,64 @@
 FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::NIGHTLY,
                        combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x3Dataset(),
                                                framework::dataset::make("DataType", { DataType::F32 })),
-                                       framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
-                               framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+                                               framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
+                                               framework::dataset::make("DataLayout", { DataLayout::NCHW })))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32);
 }
 TEST_SUITE_END() // Conv3x3
 
+TEST_SUITE(Conv3x1)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(datasets::SmallWinogradConvolutionLayer3x1Dataset(),
+                                       framework::dataset::make("DataType", { DataType::F32 })),
+                                       framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
+                                       framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::NIGHTLY,
+                       combine(combine(combine(datasets::LargeWinogradConvolutionLayer3x1Dataset(),
+                                       framework::dataset::make("DataType", { DataType::F32 })),
+                                       framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
+                                       framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32);
+}
+TEST_SUITE_END() // Conv3x1
+
+TEST_SUITE(Conv1x3)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(datasets::SmallWinogradConvolutionLayer1x3Dataset(),
+                                       framework::dataset::make("DataType", { DataType::F32 })),
+                                       framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
+                                       framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::NIGHTLY,
+                       combine(combine(combine(datasets::LargeWinogradConvolutionLayer1x3Dataset(),
+                                       framework::dataset::make("DataType", { DataType::F32 })),
+                                       framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
+                                       framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_convolution_layer_f32);
+}
+TEST_SUITE_END() // Conv1x3
+
 TEST_SUITE(Conv5x5)
 FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::PRECOMMIT,
                        combine(combine(combine(datasets::SmallWinogradConvolutionLayer5x5Dataset(),
                                                framework::dataset::make("DataType", { DataType::F32 })),
-                                       framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
-                               framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+                                               framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
+                                               framework::dataset::make("DataLayout", { DataLayout::NCHW })))
 
 {
     // Validate output
@@ -414,8 +634,8 @@
 FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradConvolutionLayerFastMathFixture, framework::DatasetMode::NIGHTLY,
                        combine(combine(combine(datasets::LargeWinogradConvolutionLayer5x5Dataset(),
                                                framework::dataset::make("DataType", { DataType::F32 })),
-                                       framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
-                               framework::dataset::make("DataLayout", { DataLayout::NCHW })))
+                                               framework::dataset::make("ActivationLayerInfo", { ActivationLayerInfo() })),
+                                               framework::dataset::make("DataLayout", { DataLayout::NCHW })))
 
 {
     // Validate output
@@ -424,7 +644,6 @@
 TEST_SUITE_END() // Conv5x5
 
 TEST_SUITE_END() // ConvolutionLayer
-
 TEST_SUITE_END() // Winograd
 TEST_SUITE_END() // CL
 } // namespace validation
diff --git a/tests/validation/Helpers.cpp b/tests/validation/Helpers.cpp
index e2415a2..ff69b1c 100644
--- a/tests/validation/Helpers.cpp
+++ b/tests/validation/Helpers.cpp
@@ -215,7 +215,7 @@
 template <typename T>
 void get_tile(const SimpleTensor<T> &in, SimpleTensor<T> &tile, const Coordinates &coord)
 {
-    ARM_COMPUTE_ERROR_ON(tile.shape().num_dimensions() != 2);
+    ARM_COMPUTE_ERROR_ON(tile.shape().num_dimensions() > 2);
 
     const int w_tile = tile.shape()[0];
     const int h_tile = tile.shape()[1];
@@ -272,7 +272,36 @@
     }
 }
 
+template <typename T>
+void zeros(SimpleTensor<T> &in, const Coordinates &anchor, const TensorShape &shape)
+{
+    ARM_COMPUTE_ERROR_ON(anchor.num_dimensions() != shape.num_dimensions());
+    ARM_COMPUTE_ERROR_ON(in.shape().num_dimensions() > 2);
+    ARM_COMPUTE_ERROR_ON(shape.num_dimensions() > 2);
+
+    // Check if with the dimensions greater than 2 we could have out-of-bound reads
+    for(size_t d = 0; d < Coordinates::num_max_dimensions; ++d)
+    {
+        if(anchor[d] < 0 || ((anchor[d] + shape[d]) > in.shape()[d]))
+        {
+            ARM_COMPUTE_ERROR("anchor[d] < 0 || (anchor[d] + shape[d]) > in.shape()[d]");
+        }
+    }
+
+    // Get input pointer
+    auto in_ptr = static_cast<T *>(in(anchor[0] + anchor[1] * in.shape()[0]));
+
+    const unsigned int n = in.shape()[0];
+
+    for(unsigned int y = 0; y < shape[1]; ++y)
+    {
+        std::fill(in_ptr, in_ptr + shape[0], 0);
+        in_ptr += n;
+    }
+}
+
 template void get_tile(const SimpleTensor<float> &in, SimpleTensor<float> &roi, const Coordinates &coord);
+template void zeros(SimpleTensor<float> &in, const Coordinates &anchor, const TensorShape &shape);
 } // namespace validation
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
index 49432d6..88262d5 100644
--- a/tests/validation/Helpers.h
+++ b/tests/validation/Helpers.h
@@ -259,6 +259,15 @@
  */
 template <typename T>
 void get_tile(const SimpleTensor<T> &in, SimpleTensor<T> &tile, const Coordinates &coord);
+
+/** Fill with zeros the input tensor in the area defined by anchor and shape
+ *
+ * @param[in]  in     Input tensor to fill with zeros
+ * @param[out] anchor Starting point of the zeros area
+ * @param[in]  shape  Ending point of the zeros area
+ */
+template <typename T>
+void zeros(SimpleTensor<T> &in, const Coordinates &anchor, const TensorShape &shape);
 } // namespace validation
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
index aca24f1..ac168eb 100644
--- a/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
+++ b/tests/validation/fixtures/WinogradConvolutionLayerFixture.h
@@ -259,7 +259,18 @@
             fill(bias, 2, 0.f, 0.f);
         }
 
-        WinogradInfo winograd_info(Size2D(4U, 4U),
+        // Set output tile
+        Size2D output_tile(4U, 4U);
+        if(weights_shape[0] == 1)
+        {
+            output_tile.width = 1;
+        }
+        else if(weights_shape[1] == 1)
+        {
+            output_tile.height = 1;
+        }
+
+        WinogradInfo winograd_info(output_tile,
                                    Size2D(weights_shape[0], weights_shape[1]),
                                    Size2D(input_shape[0], input_shape[1]),
                                    info,
diff --git a/tests/validation/reference/Winograd.cpp b/tests/validation/reference/Winograd.cpp
index 197d218..5be4fe2 100644
--- a/tests/validation/reference/Winograd.cpp
+++ b/tests/validation/reference/Winograd.cpp
@@ -29,6 +29,7 @@
 #include "arm_compute/core/Types.h"
 
 #include <algorithm>
+#include <cmath>
 
 namespace arm_compute
 {
@@ -142,12 +143,24 @@
     {
         { WinogradKey(std::pair<int, int>(2, 2), std::pair<int, int>(3, 3), WinogradTransformType::INPUT), imatrix2x2_3x3 },
         { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(3, 3), WinogradTransformType::INPUT), imatrix4x4_3x3 },
+        { WinogradKey(std::pair<int, int>(2, 1), std::pair<int, int>(3, 1), WinogradTransformType::INPUT), imatrix2x2_3x3 },
+        { WinogradKey(std::pair<int, int>(4, 1), std::pair<int, int>(3, 1), WinogradTransformType::INPUT), imatrix4x4_3x3 },
+        { WinogradKey(std::pair<int, int>(1, 2), std::pair<int, int>(1, 3), WinogradTransformType::INPUT), imatrix2x2_3x3 },
+        { WinogradKey(std::pair<int, int>(1, 4), std::pair<int, int>(1, 3), WinogradTransformType::INPUT), imatrix4x4_3x3 },
         { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(5, 5), WinogradTransformType::INPUT), imatrix4x4_5x5 },
         { WinogradKey(std::pair<int, int>(2, 2), std::pair<int, int>(3, 3), WinogradTransformType::FILTER), fmatrix2x2_3x3 },
         { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(3, 3), WinogradTransformType::FILTER), fmatrix4x4_3x3 },
+        { WinogradKey(std::pair<int, int>(2, 1), std::pair<int, int>(3, 1), WinogradTransformType::FILTER), fmatrix2x2_3x3 },
+        { WinogradKey(std::pair<int, int>(4, 1), std::pair<int, int>(3, 1), WinogradTransformType::FILTER), fmatrix4x4_3x3 },
+        { WinogradKey(std::pair<int, int>(1, 2), std::pair<int, int>(1, 3), WinogradTransformType::FILTER), fmatrix2x2_3x3 },
+        { WinogradKey(std::pair<int, int>(1, 4), std::pair<int, int>(1, 3), WinogradTransformType::FILTER), fmatrix4x4_3x3 },
         { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(5, 5), WinogradTransformType::FILTER), fmatrix4x4_5x5 },
         { WinogradKey(std::pair<int, int>(2, 2), std::pair<int, int>(3, 3), WinogradTransformType::OUTPUT), omatrix2x2_3x3 },
         { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(3, 3), WinogradTransformType::OUTPUT), omatrix4x4_3x3 },
+        { WinogradKey(std::pair<int, int>(2, 1), std::pair<int, int>(3, 1), WinogradTransformType::OUTPUT), omatrix2x2_3x3 },
+        { WinogradKey(std::pair<int, int>(4, 1), std::pair<int, int>(3, 1), WinogradTransformType::OUTPUT), omatrix4x4_3x3 },
+        { WinogradKey(std::pair<int, int>(1, 2), std::pair<int, int>(1, 3), WinogradTransformType::OUTPUT), omatrix2x2_3x3 },
+        { WinogradKey(std::pair<int, int>(1, 4), std::pair<int, int>(1, 3), WinogradTransformType::OUTPUT), omatrix4x4_3x3 },
         { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(5, 5), WinogradTransformType::OUTPUT), omatrix4x4_5x5 },
     };
 
@@ -175,6 +188,20 @@
 } // namespace
 
 template <typename T>
+void print_tile(SimpleTensor<T> &in)
+{
+    for(int y = 0; y < in.shape()[1]; y++)
+    {
+        for(int x = 0; x < in.shape()[0]; x++)
+        {
+            std::cout << in[x + y * in.shape()[0]] << " ";
+        }
+
+        std::cout << std::endl;
+    }
+}
+
+template <typename T>
 SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info)
 {
     ARM_COMPUTE_ERROR_ON(in.data_layout() != DataLayout::NCHW);
@@ -189,7 +216,10 @@
     const unsigned int tile_w = output_tile_size.width + kernel_size.width - 1;
     const unsigned int tile_h = output_tile_size.height + kernel_size.height - 1;
 
-    TensorShape tile_dims(tile_w, tile_h);
+    // Get the maximum dimension from the tile size
+    const unsigned int tile_max_dim = std::max(tile_w, tile_h);
+
+    TensorShape tile_dims(tile_max_dim, tile_max_dim);
 
     // Simple tensor for the input tile
     SimpleTensor<T> src_tile{ tile_dims, in.data_type() };
@@ -217,11 +247,46 @@
     const int in_d        = in.shape().z();
     const int out_d       = out.shape().z();
     const int num_batches = in.shape().total_size() / (in_w * in_h * in_d);
-    const int num_tiles_x = std::ceil((in_w - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right()) / static_cast<float>(output_tile_size.width));
-    const int num_tiles_y = std::ceil((in_h - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom()) / static_cast<float>(output_tile_size.height));
     const int step_x      = output_tile_size.width;
     const int step_y      = output_tile_size.height;
 
+    // Compute the number of output tiles along the x and y direction of size "output_tile_size"
+    const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(in_w, in_h),
+                                                                kernel_size,
+                                                                output_tile_size,
+                                                                conv_info);
+
+    const int num_tiles_x = num_tiles.width;
+    const int num_tiles_y = num_tiles.height;
+
+    // In case of 1D convolution, the input tile has to be partially filled with zeros
+    int start_x_zero = 0;
+    int start_y_zero = 0;
+    int end_x_zero   = 0;
+    int end_y_zero   = 0;
+
+    if(output_tile_size.width == 1)
+    {
+        start_x_zero = 1;
+        start_y_zero = 0;
+        end_x_zero   = tile_max_dim - 1;
+        end_y_zero   = tile_max_dim;
+    }
+    else if(output_tile_size.height == 1)
+    {
+        start_x_zero = 0;
+        start_y_zero = 1;
+        end_x_zero   = tile_max_dim;
+        end_y_zero   = tile_max_dim - 1;
+    }
+
+    // Set the anchor and shape of the zeros area
+    const Coordinates anchor_zeros(start_x_zero, start_y_zero);
+    const TensorShape shape_zeros(end_x_zero, end_y_zero);
+
+    // If we have a vertical filter (i.e. 1x3, 1x5,..), we need to take the elements along the y direction (step = width of the output tile)
+    const int step_y_transf_tile = kernel_size.width == 1 ? tile_max_dim : 1;
+
     ARM_COMPUTE_ERROR_ON((num_tiles_x * num_tiles_y) != static_cast<int>(out.shape().y()));
 
     for(int b = 0; b < num_batches; ++b)
@@ -238,6 +303,9 @@
                     // Get the tile from the input tensor
                     get_tile(in, src_tile, Coordinates(xi, yi, z, b));
 
+                    // Fill partially with zeros in case of 1D convolution
+                    zeros(src_tile, anchor_zeros, shape_zeros);
+
                     // Compute the transformation
                     matrix_multiply(matrix, src_tile, tmp_tile);
                     matrix_multiply(tmp_tile, matrix_transposed, dst_tile);
@@ -247,7 +315,7 @@
                     {
                         int xo = z;
                         int yo = x + y * num_tiles_x;
-                        out[coords2index(out.shape(), Coordinates(xo, yo, i, b))] = dst_tile[i];
+                        out[coords2index(out.shape(), Coordinates(xo, yo, i, b))] = dst_tile[i * step_y_transf_tile];
                     }
                 }
             }
@@ -268,27 +336,31 @@
     const Size2D output_tile_size = winograd_info.output_tile_size;
     const Size2D kernel_size      = winograd_info.kernel_size;
 
-    TensorShape kernel_tile_dims(kernel_size.width, kernel_size.height);
-
     // Calculate dimensions for the tile
     const unsigned int input_tile_w    = output_tile_size.width + kernel_size.width - 1;
     const unsigned int input_tile_h    = output_tile_size.height + kernel_size.height - 1;
     const unsigned int input_tile_area = input_tile_w * input_tile_h;
 
+    // Get the maximum dimension from the filter size
+    const unsigned int kernel_max_dim = std::max(kernel_size.width, kernel_size.height);
+
+    // Get the maximum dimension from the input tile
+    const unsigned int input_tile_max_dim = std::max(input_tile_w, input_tile_h);
+
     // Simple tensor for the input tile
-    SimpleTensor<T> input_tile{ kernel_tile_dims, in.data_type(), 1 };
+    SimpleTensor<T> input_tile{ TensorShape(kernel_max_dim, kernel_max_dim), in.data_type(), 1 };
 
     // Simple tensor for the transformation matrix
-    SimpleTensor<T> trans_matrix{ TensorShape(kernel_tile_dims[0], input_tile_w), in.data_type(), 1 };
+    SimpleTensor<T> trans_matrix{ TensorShape(kernel_max_dim, input_tile_max_dim), in.data_type(), 1 };
 
     // Simple tensor for the transformation matrix transpose
-    SimpleTensor<T> trans_matrix_transposed{ TensorShape(input_tile_w, kernel_tile_dims[0]), in.data_type(), 1 };
+    SimpleTensor<T> trans_matrix_transposed{ TensorShape(input_tile_max_dim, kernel_max_dim), in.data_type(), 1 };
 
     // Simple tensor for the temporary tile
-    SimpleTensor<T> tmp_tile{ TensorShape(kernel_tile_dims[0], input_tile_w), in.data_type(), 1 };
+    SimpleTensor<T> tmp_tile{ TensorShape(kernel_max_dim, input_tile_max_dim), in.data_type(), 1 };
 
     // Simple tensor for the output tile
-    SimpleTensor<T> transf_tile{ TensorShape(input_tile_w, input_tile_w), in.data_type(), 1 };
+    SimpleTensor<T> transf_tile{ TensorShape(input_tile_max_dim, input_tile_max_dim), in.data_type(), 1 };
 
     // Initialize matrix for the filter transform
     initialize_matrix_transform(trans_matrix, output_tile_size, kernel_size, WinogradTransformType::FILTER);
@@ -300,6 +372,9 @@
     const int num_filters  = in.shape()[3];
     const int num_batches  = in.shape().total_size() / (kernel_size.area() * num_channels * num_filters);
 
+    // If we have a vertical filter (i.e. 1x3, 1x5,..), we need to take the elements along the y direction (step_y_transf_tile = width of the output tile)
+    const int step_y_transf_tile = kernel_size.width == 1 ? input_tile_max_dim : 1;
+
     for(int n = 0; n < num_batches; ++n)
     {
         for(int w = 0; w < num_filters; ++w)
@@ -321,7 +396,7 @@
                 // Store the values across the channels
                 for(unsigned int i = 0; i < input_tile_area; ++i)
                 {
-                    out[output_offset + i * num_filters * num_channels] = transf_tile[i];
+                    out[output_offset + i * num_filters * num_channels] = transf_tile[i * step_y_transf_tile];
                 }
             }
         }
@@ -350,15 +425,19 @@
     ARM_COMPUTE_ERROR_ON(in.shape()[2] != (in_tile_w * in_tile_h));
     ARM_COMPUTE_ERROR_ON(in.shape()[0] != out.shape()[get_data_layout_dimension_index(winograd_info.output_data_layout, DataLayoutDimension::CHANNEL)]);
 
+    // Get the maximum dimension from the tile size
+    const unsigned int in_tile_max_dim  = std::max(in_tile_w, in_tile_h);
+    const unsigned int out_tile_max_dim = std::max(output_tile_size.width, output_tile_size.height);
+
     // Compute tile dimensions
     // Input tile dimensions
-    TensorShape in_tile_dims(in_tile_w, in_tile_h);
+    TensorShape in_tile_dims(in_tile_max_dim, in_tile_max_dim);
 
     // Output tile dimensions
-    TensorShape out_tile_dims(output_tile_size.width, output_tile_size.height);
+    TensorShape out_tile_dims(out_tile_max_dim, out_tile_max_dim);
 
     // Transformation matrix dimensions
-    TensorShape tr_tile_dims(in_tile_w, output_tile_size.width);
+    TensorShape tr_tile_dims(in_tile_max_dim, out_tile_max_dim);
 
     // Create tensors
     // Simple tensor for the input tile
@@ -400,15 +479,24 @@
     const int stridez_out = stridey_out * h_out;
     const int stridew_out = stridez_out * c_out;
 
-    // Compute number of elements to process in the X and Y direction
-    const int num_elements_x = input_dimensions.width - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right();
-    const int num_elements_y = input_dimensions.height - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom();
-    const int num_tiles_x    = std::ceil(num_elements_x / static_cast<float>(output_tile_size.width));
-    const int num_tiles_y    = std::ceil(num_elements_y / static_cast<float>(output_tile_size.height));
+    // Compute the number of output tiles along the x and y direction of size "output_tile_size"
+    const Size2D num_tiles = compute_winograd_convolution_tiles(Size2D(input_dimensions.width, input_dimensions.height),
+                                                                kernel_size,
+                                                                output_tile_size,
+                                                                conv_info);
+
+    const int num_tiles_x = num_tiles.width;
+    const int num_tiles_y = num_tiles.height;
 
     ARM_COMPUTE_UNUSED(num_tiles_y);
     ARM_COMPUTE_ERROR_ON(in.shape()[1] != static_cast<unsigned int>(num_tiles_x * num_tiles_y));
 
+    // If we have a vertical filter (i.e. 1x3, 1x5,..), we still need to take the elements along the x direction (step_y_transf_tile = 1)
+    const int step_y_transf_tile = kernel_size.width == 1 ? 1 : output_tile.shape()[0];
+
+    // Initialize with zeros the input tile
+    zeros(input_tile, Coordinates(0, 0), input_tile.shape());
+
     for(int n = 0; n < num_batches; ++n)
     {
         for(int y = 0; y < h_in; ++y)
@@ -441,7 +529,7 @@
                         // Check out-of-bound writes
                         if((xo + xi < w_out) && (yo + yi < h_out))
                         {
-                            out[output_offset + yi * stridey_out + xi] = output_tile[xi + yi * out_tile_w];
+                            out[output_offset + yi * stridey_out + xi] = output_tile[xi + yi * step_y_transf_tile];
 
                             // Add bias
                             out[output_offset + yi * stridey_out + xi] += b[zo];