COMPMID-1013 - Create WinogradInfo data structure
COMPMID-1014 - Refactoring Winograd's dataset

Change-Id: I6abdcbf9a90d663f4db666cd410afece9f1d034d
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/125899
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index e939a6f..928ff73 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -372,6 +372,38 @@
     }
 };
 
+/** Data set containing small 3x3 tensor shapes. */
+class Small3x3Shapes final : public ShapeDataset
+{
+public:
+    Small3x3Shapes()
+        : ShapeDataset("Shape",
+    {
+        TensorShape{ 3U, 3U, 7U, 4U },
+                     TensorShape{ 3U, 3U, 4U, 13U },
+                     TensorShape{ 3U, 3U, 9U, 2U },
+                     TensorShape{ 3U, 3U, 3U, 5U },
+    })
+    {
+    }
+};
+
+/** Data set containing large 3x3 tensor shapes. */
+class Large3x3Shapes final : public ShapeDataset
+{
+public:
+    Large3x3Shapes()
+        : ShapeDataset("Shape",
+    {
+        TensorShape{ 3U, 3U, 32U, 64U },
+                     TensorShape{ 3U, 3U, 51U, 13U },
+                     TensorShape{ 3U, 3U, 53U, 47U },
+                     TensorShape{ 3U, 3U, 128U, 384U },
+    })
+    {
+    }
+};
+
 /** Data set containing small tensor shapes for deconvolution. */
 class SmallDeconvolutionShapes final : public ShapeDataset
 {
diff --git a/tests/datasets/WinogradFilterTransformDataset.h b/tests/datasets/WinogradFilterTransformDataset.h
deleted file mode 100644
index 07d0283..0000000
--- a/tests/datasets/WinogradFilterTransformDataset.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (c) 2018 ARM Limited.
- *
- * SPDX-License-Identifier: MIT
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef ARM_COMPUTE_TEST_WINOGRAD_FILTER_TRANSFORM_DATASET
-#define ARM_COMPUTE_TEST_WINOGRAD_FILTER_TRANSFORM_DATASET
-
-#include "utils/TypePrinter.h"
-
-#include "arm_compute/core/TensorShape.h"
-
-namespace arm_compute
-{
-namespace test
-{
-namespace datasets
-{
-class WinogradFilterTransformDataset
-{
-public:
-    using type = std::tuple<TensorShape, bool>;
-
-    struct iterator
-    {
-        iterator(std::vector<TensorShape>::const_iterator a_it,
-                 std::vector<bool>::const_iterator        is_nchw_it)
-            : _a_it{ std::move(a_it) },
-              _is_nchw_it{ std::move(is_nchw_it) }
-        {
-        }
-
-        std::string description() const
-        {
-            std::stringstream description;
-            description << "Input=" << *_a_it << ":";
-            description << "IsNCHW=" << *_is_nchw_it << ":";
-            return description.str();
-        }
-
-        WinogradFilterTransformDataset::type operator*() const
-        {
-            return std::make_tuple(*_a_it, *_is_nchw_it);
-        }
-
-        iterator &operator++()
-        {
-            ++_a_it;
-            ++_is_nchw_it;
-
-            return *this;
-        }
-
-    private:
-        std::vector<TensorShape>::const_iterator _a_it;
-        std::vector<bool>::const_iterator        _is_nchw_it;
-    };
-
-    iterator begin() const
-    {
-        return iterator(_a_shapes.begin(), _is_nchw.begin());
-    }
-
-    int size() const
-    {
-        return std::min(_a_shapes.size(), _is_nchw.size());
-    }
-
-    void add_config(TensorShape a, bool is_nchw)
-    {
-        _a_shapes.emplace_back(std::move(a));
-        _is_nchw.emplace_back(std::move(is_nchw));
-    }
-
-protected:
-    WinogradFilterTransformDataset()                                  = default;
-    WinogradFilterTransformDataset(WinogradFilterTransformDataset &&) = default;
-
-private:
-    std::vector<TensorShape> _a_shapes{};
-    std::vector<bool>        _is_nchw{};
-};
-
-class SmallWinogradFilterTransformDataset final : public WinogradFilterTransformDataset
-{
-public:
-    SmallWinogradFilterTransformDataset()
-    {
-        add_config(TensorShape(3U, 3U, 7U, 4U), true);
-        add_config(TensorShape(3U, 3U, 4U, 13U), true);
-        add_config(TensorShape(3U, 3U, 9U, 2U), true);
-        add_config(TensorShape(3U, 3U, 3U, 5U), true);
-    }
-};
-
-class LargeWinogradFilterTransformDataset final : public WinogradFilterTransformDataset
-{
-public:
-    LargeWinogradFilterTransformDataset()
-    {
-        add_config(TensorShape(3U, 3U, 32U, 64U), true);
-        add_config(TensorShape(3U, 3U, 51U, 13U), true);
-        add_config(TensorShape(3U, 3U, 53U, 47U), true);
-        add_config(TensorShape(3U, 3U, 128U, 384U), true);
-    }
-};
-} // namespace datasets
-} // namespace test
-} // namespace arm_compute
-#endif /* ARM_COMPUTE_TEST_WINOGRAD_FILTER_TRANSFORM_DATASET */
diff --git a/tests/datasets/WinogradInputTransformDataset.h b/tests/datasets/WinogradInputTransformDataset.h
index 07e41eb..625daa0 100644
--- a/tests/datasets/WinogradInputTransformDataset.h
+++ b/tests/datasets/WinogradInputTransformDataset.h
@@ -37,13 +37,12 @@
 class WinogradInputTransformDataset
 {
 public:
-    using type = std::tuple<TensorShape, PadStrideInfo, Size2D, bool>;
+    using type = std::tuple<TensorShape, WinogradInfo>;
 
     struct iterator
     {
-        iterator(std::vector<TensorShape>::const_iterator in_it, std::vector<PadStrideInfo>::const_iterator info_it, std::vector<Size2D>::const_iterator kernel_dims_it,
-                 std::vector<bool>::const_iterator format_it)
-            : _in_it{ std::move(in_it) }, _info_it{ std::move(info_it) }, _kernel_dims_it{ std::move(kernel_dims_it) }, _format_it{ std::move(format_it) }
+        iterator(std::vector<TensorShape>::const_iterator in_it, std::vector<WinogradInfo>::const_iterator info_it)
+            : _in_it{ std::move(in_it) }, _info_it{ std::move(info_it) }
         {
         }
 
@@ -51,50 +50,42 @@
         {
             std::stringstream description;
             description << "In=" << *_in_it << ":";
-            description << "Info=" << *_info_it;
-            description << "KernelDims=" << *_kernel_dims_it;
-            description << "IsNCHW=" << *_format_it;
+            description << "WinogradInfo=" << *_info_it;
             return description.str();
         }
 
         WinogradInputTransformDataset::type operator*() const
         {
-            return std::make_tuple(*_in_it, *_info_it, *_kernel_dims_it, *_format_it);
+            return std::make_tuple(*_in_it, *_info_it);
         }
 
         iterator &operator++()
         {
             ++_in_it;
             ++_info_it;
-            ++_kernel_dims_it;
-            ++_format_it;
 
             return *this;
         }
 
     private:
-        std::vector<TensorShape>::const_iterator   _in_it;
-        std::vector<PadStrideInfo>::const_iterator _info_it;
-        std::vector<Size2D>::const_iterator        _kernel_dims_it;
-        std::vector<bool>::const_iterator          _format_it;
+        std::vector<TensorShape>::const_iterator  _in_it;
+        std::vector<WinogradInfo>::const_iterator _info_it;
     };
 
     iterator begin() const
     {
-        return iterator(_in_shapes.begin(), _infos.begin(), _kernel_dims.begin(), _format.begin());
+        return iterator(_in_shapes.begin(), _infos.begin());
     }
 
     int size() const
     {
-        return std::min(_in_shapes.size(), std::min(_infos.size(), std::min(_kernel_dims.size(), _format.size())));
+        return std::min(_in_shapes.size(), _infos.size());
     }
 
-    void add_config(TensorShape in, PadStrideInfo info, Size2D kernel_dims, bool format)
+    void add_config(TensorShape in, WinogradInfo info)
     {
         _in_shapes.emplace_back(std::move(in));
         _infos.emplace_back(std::move(info));
-        _kernel_dims.emplace_back(std::move(kernel_dims));
-        _format.emplace_back(std::move(format));
     }
 
 protected:
@@ -102,10 +93,8 @@
     WinogradInputTransformDataset(WinogradInputTransformDataset &&) = default;
 
 private:
-    std::vector<TensorShape>   _in_shapes{};
-    std::vector<PadStrideInfo> _infos{};
-    std::vector<Size2D>        _kernel_dims{};
-    std::vector<bool>          _format{};
+    std::vector<TensorShape>  _in_shapes{};
+    std::vector<WinogradInfo> _infos{};
 };
 
 class SmallWinogradInputTransformDataset final : public WinogradInputTransformDataset
@@ -113,13 +102,13 @@
 public:
     SmallWinogradInputTransformDataset()
     {
-        add_config(TensorShape(9U, 9U), PadStrideInfo(1, 1, 1, 1), Size2D(3U, 3U), true);
-        add_config(TensorShape(27U, 13U, 2U), PadStrideInfo(1, 1, 0, 0), Size2D(3U, 3U), true);
-        add_config(TensorShape(128U, 64U, 1U, 3U), PadStrideInfo(1, 1, 1, 1), Size2D(3U, 3U), true);
-        add_config(TensorShape(9U, 9U, 3U, 4U), PadStrideInfo(1, 1, 0, 0), Size2D(3U, 3U), true);
-        add_config(TensorShape(27U, 13U, 2U, 4U), PadStrideInfo(1, 1, 1, 1), Size2D(3U, 3U), true);
-        add_config(TensorShape(9U, 9U, 3U, 5U), PadStrideInfo(1, 1, 0, 0), Size2D(3U, 3U), true);
-        add_config(TensorShape(14U, 14U, 512U, 2U), PadStrideInfo(1, 1, 1, 1), Size2D(3U, 3U), true);
+        add_config(TensorShape(9U, 9U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(9U, 9U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW));
+        add_config(TensorShape(27U, 13U, 2U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(27U, 13U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW));
+        add_config(TensorShape(128U, 64U, 1U, 3U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(128U, 64U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW));
+        add_config(TensorShape(9U, 9U, 3U, 4U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(9U, 9U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW));
+        add_config(TensorShape(27U, 13U, 2U, 4U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(27U, 13U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW));
+        add_config(TensorShape(9U, 9U, 3U, 5U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(9U, 9U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW));
+        add_config(TensorShape(14U, 14U, 512U, 2U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW));
     }
 };
 
@@ -128,10 +117,10 @@
 public:
     LargeWinogradInputTransformDataset()
     {
-        add_config(TensorShape(42U, 37U, 8U, 15U), PadStrideInfo(1, 1, 1, 1), Size2D(3U, 3U), true);
-        add_config(TensorShape(57U, 60U, 13U, 8U), PadStrideInfo(1, 1, 1, 1), Size2D(3U, 3U), true);
-        add_config(TensorShape(128U, 64U, 21U, 13U), PadStrideInfo(1, 1, 0, 0), Size2D(3U, 3U), true);
-        add_config(TensorShape(83U, 72U, 14U, 5U), PadStrideInfo(1, 1, 0, 0), Size2D(3U, 3U), true);
+        add_config(TensorShape(42U, 37U, 8U, 15U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(42U, 37U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW));
+        add_config(TensorShape(57U, 60U, 13U, 8U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(57U, 60U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW));
+        add_config(TensorShape(128U, 64U, 21U, 13U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(128U, 64U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW));
+        add_config(TensorShape(83U, 72U, 14U, 5U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(83U, 72U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW));
     }
 };
 } // namespace datasets
diff --git a/tests/datasets/WinogradOutputTransformDataset.h b/tests/datasets/WinogradOutputTransformDataset.h
index c42d6c8..35fce95 100644
--- a/tests/datasets/WinogradOutputTransformDataset.h
+++ b/tests/datasets/WinogradOutputTransformDataset.h
@@ -37,20 +37,14 @@
 class WinogradOutputTransformDataset
 {
 public:
-    using type = std::tuple<TensorShape, Size2D, Size2D, Size2D, DataLayout>;
+    using type = std::tuple<TensorShape, WinogradInfo>;
 
     struct iterator
     {
-        iterator(std::vector<TensorShape>::const_iterator a_it,
-                 std::vector<Size2D>::const_iterator      b_it,
-                 std::vector<Size2D>::const_iterator      c_it,
-                 std::vector<Size2D>::const_iterator      d_it,
-                 std::vector<DataLayout>::const_iterator  data_layout_it)
+        iterator(std::vector<TensorShape>::const_iterator  a_it,
+                 std::vector<WinogradInfo>::const_iterator info_it)
             : _a_it{ std::move(a_it) },
-              _b_it{ std::move(b_it) },
-              _c_it{ std::move(c_it) },
-              _d_it{ std::move(d_it) },
-              _data_layout_it{ std::move(data_layout_it) }
+              _info_it{ std::move(info_it) }
         {
         }
 
@@ -58,54 +52,42 @@
         {
             std::stringstream description;
             description << "Input=" << *_a_it << ":";
-            description << "KernelDims=" << *_b_it << ":";
-            description << "OutputDims=" << *_c_it << ":";
-            description << "NumTiles=" << *_d_it << ":";
-            description << "DataLayout=" << *_data_layout_it;
+            description << "WinogradInfo=" << *_info_it << ":";
             return description.str();
         }
 
         WinogradOutputTransformDataset::type operator*() const
         {
-            return std::make_tuple(*_a_it, *_b_it, *_c_it, *_d_it, *_data_layout_it);
+            return std::make_tuple(*_a_it, *_info_it);
         }
 
         iterator &operator++()
         {
             ++_a_it;
-            ++_b_it;
-            ++_c_it;
-            ++_d_it;
-            ++_data_layout_it;
+            ++_info_it;
 
             return *this;
         }
 
     private:
-        std::vector<TensorShape>::const_iterator _a_it;
-        std::vector<Size2D>::const_iterator      _b_it;
-        std::vector<Size2D>::const_iterator      _c_it;
-        std::vector<Size2D>::const_iterator      _d_it;
-        std::vector<DataLayout>::const_iterator  _data_layout_it;
+        std::vector<TensorShape>::const_iterator  _a_it;
+        std::vector<WinogradInfo>::const_iterator _info_it;
     };
 
     iterator begin() const
     {
-        return iterator(_a_shapes.begin(), _b_dims.begin(), _c_dims.begin(), _d_dims.begin(), _data_layout.begin());
+        return iterator(_a_shapes.begin(), _info.begin());
     }
 
     int size() const
     {
-        return std::min(_a_shapes.size(), std::min(_b_dims.size(), std::min(_c_dims.size(), std::min(_d_dims.size(), _data_layout.size()))));
+        return std::min(_a_shapes.size(), _info.size());
     }
 
-    void add_config(TensorShape a, Size2D b, Size2D c, Size2D d, DataLayout data_layout)
+    void add_config(TensorShape a, WinogradInfo b)
     {
         _a_shapes.emplace_back(std::move(a));
-        _b_dims.emplace_back(std::move(b));
-        _c_dims.emplace_back(std::move(c));
-        _d_dims.emplace_back(std::move(d));
-        _data_layout.emplace_back(std::move(data_layout));
+        _info.emplace_back(std::move(b));
     }
 
 protected:
@@ -113,11 +95,8 @@
     WinogradOutputTransformDataset(WinogradOutputTransformDataset &&) = default;
 
 private:
-    std::vector<TensorShape> _a_shapes{};
-    std::vector<Size2D>      _b_dims{};
-    std::vector<Size2D>      _c_dims{};
-    std::vector<Size2D>      _d_dims{};
-    std::vector<DataLayout>  _data_layout{};
+    std::vector<TensorShape>  _a_shapes{};
+    std::vector<WinogradInfo> _info{};
 };
 
 class SmallWinogradOutputTransformDataset final : public WinogradOutputTransformDataset
@@ -125,12 +104,12 @@
 public:
     SmallWinogradOutputTransformDataset()
     {
-        add_config(TensorShape(24U, 49U, 16U), Size2D(3, 3), Size2D(14U, 14U), Size2D(7U, 7U), DataLayout::NCHW);
-        add_config(TensorShape(13U, 6U, 16U), Size2D(3, 3), Size2D(5U, 4U), Size2D(3U, 2U), DataLayout::NCHW);
-        add_config(TensorShape(7U, 20U, 16U), Size2D(3, 3), Size2D(8U, 9U), Size2D(4U, 5U), DataLayout::NCHW);
-        add_config(TensorShape(24U, 49U, 16U, 3U), Size2D(3, 3), Size2D(14U, 14U), Size2D(7U, 7U), DataLayout::NCHW);
-        add_config(TensorShape(13U, 6U, 16U, 2U), Size2D(3, 3), Size2D(5U, 4U), Size2D(3U, 2U), DataLayout::NCHW);
-        add_config(TensorShape(7U, 20U, 16U, 5U), Size2D(3, 3), Size2D(8U, 9U), Size2D(4U, 5U), DataLayout::NCHW);
+        add_config(TensorShape(13U, 6U, 16U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(7U, 6U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW));
+        add_config(TensorShape(7U, 20U, 16U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(10U, 11U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW));
+        add_config(TensorShape(1U, 442U, 16U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(53U, 33U), PadStrideInfo(1, 1, 0, 1), DataLayout::NCHW));
+        add_config(TensorShape(7U, 12U, 16U, 3U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(8U, 10U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW));
+        add_config(TensorShape(24U, 49U, 16U, 2U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW));
+        add_config(TensorShape(7U, 12U, 16U, 5U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(8U, 10U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW));
     }
 };
 
@@ -139,12 +118,12 @@
 public:
     LargeWinogradOutputTransformDataset()
     {
-        add_config(TensorShape(128U, 3136U, 16U), Size2D(3, 3), Size2D(112U, 112U), Size2D(56U, 56U), DataLayout::NCHW);
-        add_config(TensorShape(256U, 784U, 16U), Size2D(3, 3), Size2D(55U, 55U), Size2D(28U, 28U), DataLayout::NCHW);
-        add_config(TensorShape(512U, 169U, 16U), Size2D(3, 3), Size2D(26U, 26U), Size2D(13U, 13U), DataLayout::NCHW);
-        add_config(TensorShape(128U, 3136U, 16U, 3U), Size2D(3, 3), Size2D(112U, 112U), Size2D(56U, 56U), DataLayout::NCHW);
-        add_config(TensorShape(256U, 784U, 16U, 2U), Size2D(3, 3), Size2D(55U, 55U), Size2D(28U, 28U), DataLayout::NCHW);
-        add_config(TensorShape(512U, 169U, 16U, 5U), Size2D(3, 3), Size2D(26U, 26U), Size2D(13U, 13U), DataLayout::NCHW);
+        add_config(TensorShape(64U, 12544U, 16U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(224U, 224U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW));
+        add_config(TensorShape(32U, 3080U, 16U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(112U, 112U), PadStrideInfo(1, 1, 1, 0), DataLayout::NCHW));
+        add_config(TensorShape(13U, 756U, 16U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(56U, 56U), PadStrideInfo(1, 1, 0, 1), DataLayout::NCHW));
+        add_config(TensorShape(64U, 12544U, 16U, 3U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(224U, 224U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW));
+        add_config(TensorShape(32U, 3080U, 16U, 2U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(112U, 112U), PadStrideInfo(1, 1, 1, 0), DataLayout::NCHW));
+        add_config(TensorShape(13U, 756U, 16U, 5U), WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(56U, 56U), PadStrideInfo(1, 1, 0, 1), DataLayout::NCHW));
     }
 };
 } // namespace datasets
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index 9aba8f7..8fa5826 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -35,7 +35,6 @@
 #include "tests/datasets/LargeConvolutionLayerDataset.h"
 #include "tests/datasets/ShapeDatasets.h"
 #include "tests/datasets/SmallConvolutionLayerDataset.h"
-#include "tests/datasets/WinogradFilterTransformDataset.h"
 #include "tests/datasets/WinogradInputTransformDataset.h"
 #include "tests/datasets/WinogradOutputTransformDataset.h"
 #include "tests/framework/Asserts.h"
@@ -64,7 +63,7 @@
 
 // *INDENT-OFF*
 // clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
                                                 framework::dataset::make("InputInfo",{
                                                                                         TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::F16),     // F16 not supported
                                                                                         TensorInfo(TensorShape(53U, 21U, 5U, 3U), 1, DataType::QASYMM8), // QASYMM8 not supported
@@ -83,44 +82,34 @@
                                                                                         TensorInfo(TensorShape(7U, 320U, 16U, 3U), 1, DataType::F32),
                                                                                         TensorInfo(TensorShape(37U, 304U, 16U), 1, DataType::F32)
                                                                                     })),
-                                                framework::dataset::make("PadStrideInfo", {
-                                                                                        PadStrideInfo(1, 1, 1, 0),
-                                                                                        PadStrideInfo(1, 1, 0, 0),
-                                                                                        PadStrideInfo(1, 1, 1, 1),
-                                                                                        PadStrideInfo(2, 1, 1, 1),
-                                                                                        PadStrideInfo(1, 1, 0, 1),
-                                                                                        PadStrideInfo(1, 1, 0, 0),
-                                                                                        PadStrideInfo(1, 1, 1, 1)
-                                                                                    })),
-                                                framework::dataset::make("KernelDims", {
-                                                                                        Size2D(3U, 3U),
-                                                                                        Size2D(3U, 3U),
-                                                                                        Size2D(5U, 5U),
-                                                                                        Size2D(3U, 3U),
-                                                                                        Size2D(3U, 3U),
-                                                                                        Size2D(3U, 3U),
-                                                                                        Size2D(3U, 3U)
+                                                framework::dataset::make("WinogradInfo", {
+                                                                                        WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 1, 0), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 21U), PadStrideInfo(2, 1, 1, 1), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(53U, 33U), PadStrideInfo(1, 1, 0, 1), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(34U, 42U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2, 2), Size2D(3, 3), Size2D(31U, 37U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW)
                                                                                     })),
                                                 framework::dataset::make("Expected", { false, false, false, false, false, false, false })),
-                                            input_info, output_info, conv_info, kernel_dims, expected)
+                                            input_info, output_info, winograd_info, expected)
 {
-    ARM_COMPUTE_EXPECT(bool(CLWinogradInputTransform::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), conv_info, kernel_dims)) == expected, framework::LogLevel::ERRORS);
+    ARM_COMPUTE_EXPECT(bool(CLWinogradInputTransform::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
 }
 // clang-format on
 // *INDENT-ON*
 
 using CLWinogradInputTransformFixture = WinogradInputTransformValidationFixture<CLTensor, CLAccessor, CLWinogradInputTransform, float>;
 
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallWinogradInputTransformDataset(), datasets::LargeWinogradInputTransformDataset()),
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallWinogradInputTransformDataset(), datasets::LargeWinogradInputTransformDataset()),
+                                                                           framework::dataset::make("DataLayout", { DataLayout::NCHW })),
                                                                    framework::dataset::make("DataType", { DataType::F32 })),
-               shape_in, conv_info, kernel_dims, is_nchw_format, data_type)
+               shape_in, winograd_info, data_layout, data_type)
 {
-    ARM_COMPUTE_UNUSED(is_nchw_format);
-
-    TensorShape shape_out = compute_winograd_input_transform_shape(TensorInfo(shape_in, 1, data_type), conv_info, kernel_dims);
+    TensorShape shape_out = compute_winograd_input_transform_shape(TensorInfo(shape_in, 1, data_type), winograd_info);
 
     // Create tensors
-    CLTensor in  = create_tensor<CLTensor>(shape_in, data_type);
+    CLTensor in  = create_tensor<CLTensor>(shape_in, data_type, 1, 0, QuantizationInfo(), data_layout);
     CLTensor out = create_tensor<CLTensor>(shape_out, data_type);
 
     ARM_COMPUTE_EXPECT(in.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -130,15 +119,19 @@
     CLWinogradInputTransform winograd_input_transform;
 
     // Configure the function
-    winograd_input_transform.configure(&in, &out, conv_info, kernel_dims);
+    winograd_input_transform.configure(&in, &out, winograd_info);
 }
 
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixture, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallWinogradInputTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradInputTransformFixture, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::SmallWinogradInputTransformDataset(),
+                                                                                                                     framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+                                                                                                             framework::dataset::make("DataType", { DataType::F32 })))
 {
     validate(CLAccessor(_target), _reference);
 }
 
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradInputTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradInputTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeWinogradInputTransformDataset(),
+                                                                                                                   framework::dataset::make("DataLayout", { DataLayout::NCHW })),
+                                                                                                           framework::dataset::make("DataType", { DataType::F32 })))
 {
     validate(CLAccessor(_target), _reference);
 }
@@ -166,19 +159,19 @@
                                                                                         TensorInfo(TensorShape(2U, 37U, 16U), 1, DataType::F32),
                                                                                         TensorInfo(TensorShape(22U, 37U, 36U), 1, DataType::F32)
                                                                                     })),
-                                                framework::dataset::make("OutputTile", {
-                                                                                        Size2D(2U, 2U),
-                                                                                        Size2D(2U, 2U),
-                                                                                        Size2D(2U, 2U),
-                                                                                        Size2D(3U, 3U),
-                                                                                        Size2D(2U, 2U),
-                                                                                        Size2D(2U, 2U),
-                                                                                        Size2D(4U, 4U)
-                                                                                    })),
+                                                framework::dataset::make("WinogradInfo", {
+                                                                                          WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW  /* Not needed */ ),
+                                                                                          WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW  /* Not needed */ ),
+                                                                                          WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW  /* Not needed */ ),
+                                                                                          WinogradInfo(Size2D(3U, 3U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW  /* Not needed */ ),
+                                                                                          WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW  /* Not needed */ ),
+                                                                                          WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW  /* Not needed */ ),
+                                                                                          WinogradInfo(Size2D(4U, 4U), Size2D(3U, 3U), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW  /* Not needed */ )
+                                                                                         })),
                                                 framework::dataset::make("Expected", { false, false, false, false, true, true, true })),
-                                            input_info, output_info, output_tile, expected)
+                                            input_info, output_info, winograd_info, expected)
 {
-    ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), output_tile)) == expected, framework::LogLevel::ERRORS);
+    ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
 }
 // clang-format on
 // *INDENT-ON*
@@ -186,36 +179,40 @@
 using CLWinogradFilterTransform        = CLSynthetizeFunctionWithZeroConstantBorder<CLWinogradFilterTransformKernel, 0>;
 using CLWinogradFilterTransformFixture = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, float>;
 
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(framework::dataset::concat(datasets::SmallWinogradFilterTransformDataset(), datasets::LargeWinogradFilterTransformDataset()),
-                                                                           framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(combine(framework::dataset::concat(datasets::Small3x3Shapes(), datasets::Large3x3Shapes()),
+                                                                                   framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+                                                                           framework::dataset::make("DataLayout", { DataLayout::NCHW })),
                                                                    framework::dataset::make("DataType", { DataType::F32 })),
-               shape_a, is_nchw_format, output_tile, data_type)
+               shape_a, output_tile, data_layout, data_type)
 {
-    ARM_COMPUTE_UNUSED(is_nchw_format);
+    WinogradInfo winograd_info(output_tile, Size2D(shape_a[0], shape_a[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
 
-    TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type), output_tile);
+    TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
 
     // Create tensors
-    CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
-    CLTensor b = create_tensor<CLTensor>(shape_b, data_type);
+    CLTensor a = create_tensor<CLTensor>(shape_a, data_type, 1, 0, QuantizationInfo(), data_layout);
+    CLTensor b = create_tensor<CLTensor>(shape_b, data_type, 1, 0, QuantizationInfo(), data_layout);
 
     ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
     ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
 
     // Create and configure function
     CLWinogradFilterTransform winograd_filter_transform;
-    winograd_filter_transform.configure(&a, &b, output_tile);
+    winograd_filter_transform.configure(&a, &b, winograd_info);
 }
 
-FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::ALL, combine(combine(datasets::SmallWinogradFilterTransformDataset(), framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::ALL, combine(combine(combine(datasets::Small3x3Shapes(),
+                                                                                                                        framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+                                                                                                                framework::dataset::make("DataLayout", { DataLayout::NCHW })),
                                                                                                         framework::dataset::make("DataType", { DataType::F32 })))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_f32);
 }
 
-FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(datasets::LargeWinogradFilterTransformDataset(),
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY, combine(combine(combine(datasets::Large3x3Shapes(),
                                                                                                                     framework::dataset::make("OutputTile", { Size2D(2U, 2U), Size2D(4U, 4U) })),
+                                                                                                                    framework::dataset::make("DataLayout", { DataLayout::NCHW })),
                                                                                                             framework::dataset::make("DataType", { DataType::F32 })))
 {
     // Validate output
@@ -227,65 +224,47 @@
 TEST_SUITE(OutputTransform)
 // *INDENT-OFF*
 // clang-format off
-DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(zip(zip(
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
                                                 framework::dataset::make("InputInfo",{
-                                                                                        TensorInfo(TensorShape(24U, 49U, 16U, 5U), 1, DataType::F16),        // F16 not supported
-                                                                                        TensorInfo(TensorShape(128U, 3136U, 16U, 5U), 1, DataType::QASYMM8), // QASYMM8 not supported
-                                                                                        TensorInfo(TensorShape(256U, 784U, 16U, 5U), 1, DataType::F32),      // Kernel size not supported
-                                                                                        TensorInfo(TensorShape(512U, 169U, 16U, 5U), 1, DataType::F32),      // Valid
-                                                                                        TensorInfo(TensorShape(13U, 6U, 16U, 4U), 1, DataType::F32),         // Padding needed
-                                                                                        TensorInfo(TensorShape(7U, 16U, 16U, 7U), 1, DataType::F32),         // Valid
-                                                                                        TensorInfo(TensorShape(1U, 442U, 16U, 37U), 1, DataType::F32)        // Wrong number of tiles
+                                                                                        TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F16),      // F16 not supported
+                                                                                        TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::QASYMM8),  // QASYMM8 not supported
+                                                                                        TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F32),      // Kernel size not supported
+                                                                                        TensorInfo(TensorShape(512U, 49U, 16U, 5U), 1, DataType::F32),      // Valid
+                                                                                        TensorInfo(TensorShape(13U, 108U, 16U, 4U), 1, DataType::F32),      // Padding needed
+                                                                                        TensorInfo(TensorShape(7U, 20U, 16U, 7U), 1, DataType::F32),        // Valid
+                                                                                        TensorInfo(TensorShape(7U, 20U, 16U, 7U), 1, DataType::F32)         // Wrong WinogradInfo
                                                                                     }),
                                                 framework::dataset::make("BiasInfo", {
-                                                                                        TensorInfo(TensorShape(24U), 1, DataType::F16),
-                                                                                        TensorInfo(TensorShape(128U), 1, DataType::QASYMM8),
-                                                                                        TensorInfo(TensorShape(256U), 1, DataType::F32),
+                                                                                        TensorInfo(TensorShape(512U), 1, DataType::F16),
+                                                                                        TensorInfo(TensorShape(512U), 1, DataType::QASYMM8),
+                                                                                        TensorInfo(TensorShape(512U), 1, DataType::F32),
                                                                                         TensorInfo(TensorShape(512U), 1, DataType::F32),
                                                                                         TensorInfo(TensorShape(13U), 1, DataType::F32),
                                                                                         TensorInfo(TensorShape(7U), 1, DataType::F32),
-                                                                                        TensorInfo(TensorShape(1U), 1, DataType::F32)
+                                                                                        TensorInfo(TensorShape(7U), 1, DataType::F32)
                                                                                     })),
                                                 framework::dataset::make("OutputInfo", {
-                                                                                        TensorInfo(TensorShape(14U, 14U, 24U, 5U), 1, DataType::F16),
-                                                                                        TensorInfo(TensorShape(112U, 112U, 128U, 5U), 1, DataType::QASYMM8),
-                                                                                        TensorInfo(TensorShape(55U, 55U, 256U, 5U), 1, DataType::F32),
-                                                                                        TensorInfo(TensorShape(26U, 26U, 512U, 5U), 1, DataType::F32),
-                                                                                        TensorInfo(TensorShape(5U, 4U, 13U, 4U), 1, DataType::F32),
-                                                                                        TensorInfo(TensorShape(8U, 8U, 7U, 7U), 1, DataType::F32),
-                                                                                        TensorInfo(TensorShape(51U, 33U, 1U, 37U), 1, DataType::F32)
+                                                                                        TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F16),
+                                                                                        TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::QASYMM8),
+                                                                                        TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F32),
+                                                                                        TensorInfo(TensorShape(14U, 14U, 512U, 5U), 1, DataType::F32),
+                                                                                        TensorInfo(TensorShape(17U, 23U, 13U, 4U), 1, DataType::F32),
+                                                                                        TensorInfo(TensorShape(8U, 10U, 7U, 7U), 1, DataType::F32),
+                                                                                        TensorInfo(TensorShape(7U, 9U, 7U, 7U), 1, DataType::F32)
                                                                                     })),
-                                                framework::dataset::make("KernelDims", {
-                                                                                        Size2D(3U, 3U),
-                                                                                        Size2D(3U, 3U),
-                                                                                        Size2D(5U, 5U),
-                                                                                        Size2D(3U, 3U),
-                                                                                        Size2D(3U, 3U),
-                                                                                        Size2D(3U, 3U),
-                                                                                        Size2D(3U, 3U)
-                                                                                    })),
-                                                framework::dataset::make("OutputDims", {
-                                                                                        Size2D(14U, 14U),
-                                                                                        Size2D(112U, 112U),
-                                                                                        Size2D(55U, 55U),
-                                                                                        Size2D(26U, 26U),
-                                                                                        Size2D(5U, 4U),
-                                                                                        Size2D(8U, 8U),
-                                                                                        Size2D(51U, 33U)
-                                                                                    })),
-                                                framework::dataset::make("NumTiles", {
-                                                                                        Size2D(7U, 7U),
-                                                                                        Size2D(56U, 56U),
-                                                                                        Size2D(28U, 28U),
-                                                                                        Size2D(13U, 13U),
-                                                                                        Size2D(3U, 2U),
-                                                                                        Size2D(4U, 4U),
-                                                                                        Size2D(26U, 16U)
+                                                framework::dataset::make("WinogradInfo", {
+                                                                                        WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2U, 2U), Size2D(5U, 5U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(14U, 14U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(17U, 23U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2U, 2U), Size2D(3U, 3U), Size2D(8U, 10U), PadStrideInfo(1, 1, 1, 1), DataLayout::NCHW),
+                                                                                        WinogradInfo(Size2D(2U, 3U), Size2D(3U, 3U), Size2D(8U, 10U), PadStrideInfo(1, 1, 0, 0), DataLayout::NCHW),
                                                                                     })),
                                                 framework::dataset::make("Expected", { false, false, false, true, false, true, false })),
-                                            input_info, bias_info, output_info, kernel_dims, output_dims, num_tiles, expected)
+                                            input_info, bias_info, output_info, winograd_info, expected)
 {
-    ARM_COMPUTE_EXPECT(bool(CLWinogradOutputTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), kernel_dims, output_dims, num_tiles)) == expected, framework::LogLevel::ERRORS);
+    ARM_COMPUTE_EXPECT(bool(CLWinogradOutputTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &bias_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), winograd_info)) == expected, framework::LogLevel::ERRORS);
 }
 // clang-format on
 // *INDENT-ON*
@@ -295,9 +274,9 @@
 
 DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallWinogradOutputTransformDataset(), datasets::LargeWinogradOutputTransformDataset()),
                                                                    framework::dataset::make("DataType", { DataType::F32 })),
-               shape_a, kernel_dims, output_convolved_dims, num_tiles, data_layout, data_type)
+               shape_a, winograd_info, data_type)
 {
-    TensorShape shape_b = compute_winograd_output_transform_shape(TensorInfo(shape_a, 1, data_type), output_convolved_dims, data_layout);
+    TensorShape shape_b = compute_winograd_output_transform_shape(TensorInfo(shape_a, 1, data_type), winograd_info);
 
     // Create tensors
     CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
@@ -308,7 +287,7 @@
 
     // Create and configure function
     CLWinogradOutputTransform winograd_output_transform;
-    winograd_output_transform.configure(&a, nullptr, &b, kernel_dims, output_convolved_dims, num_tiles);
+    winograd_output_transform.configure(&a, nullptr, &b, winograd_info);
 }
 
 FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradOutputTransformFixture, framework::DatasetMode::ALL, combine(datasets::SmallWinogradOutputTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
diff --git a/tests/validation/fixtures/WinogradLayerFixture.h b/tests/validation/fixtures/WinogradLayerFixture.h
index 481eb93..17229ca 100644
--- a/tests/validation/fixtures/WinogradLayerFixture.h
+++ b/tests/validation/fixtures/WinogradLayerFixture.h
@@ -142,8 +142,9 @@
             fill(bias, 2, 0.f, 0.f);
         }
 
-        return (act_info.enabled()) ? reference::activation_layer<T>(reference::convolution_layer<T>(src, weights, bias, output_shape, info), act_info) : reference::convolution_layer<T>(src, weights, bias,
-                output_shape, info);
+        SimpleTensor<T> conv_out = reference::convolution_layer<T>(src, weights, bias, output_shape, info);
+
+        return (act_info.enabled()) ? reference::activation_layer<T>(conv_out, act_info) : conv_out;
     }
 
     TensorType      _target{};
@@ -155,12 +156,12 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape input_shape, PadStrideInfo conv_info, Size2D kernel_dims, bool is_nchw_format, DataType data_type)
+    void setup(TensorShape input_shape, WinogradInfo winograd_info, DataLayout data_layout, DataType data_type)
     {
-        TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), conv_info, kernel_dims);
+        TensorShape output_shape = compute_winograd_input_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
 
-        _target    = compute_target(input_shape, output_shape, conv_info, kernel_dims, is_nchw_format, data_type);
-        _reference = compute_reference(input_shape, output_shape, conv_info, kernel_dims, is_nchw_format, data_type);
+        _target    = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
+        _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
     }
 
 protected:
@@ -184,16 +185,14 @@
         }
     }
 
-    TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims, bool is_nchw_format, DataType data_type)
+    TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
     {
-        ARM_COMPUTE_UNUSED(is_nchw_format);
-
-        TensorType src = create_tensor<TensorType>(input_shape, data_type);
-        TensorType dst = create_tensor<TensorType>(output_shape, data_type);
+        TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
+        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
 
         // Create and configure function
         FunctionType transf;
-        transf.configure(&src, &dst, conv_info, kernel_dims);
+        transf.configure(&src, &dst, winograd_info);
 
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -208,23 +207,21 @@
         // Fill tensors
         fill(AccessorType(src), 0, -1.f, 1.f);
 
-        // Compute CLWinogradInputTransform function
+        // Compute Winograd input transform function
         transf.run();
 
         return dst;
     }
 
-    SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims, bool is_nchw_format, DataType data_type)
+    SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
     {
-        ARM_COMPUTE_UNUSED(is_nchw_format);
-
         // Create reference
-        SimpleTensor<T> src{ input_shape, data_type };
+        SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo(), data_layout };
 
         // Fill reference
         fill(src, 0, -1.f, 1.f);
 
-        return reference::winograd_input_transform<T>(src, output_shape, conv_info, kernel_dims);
+        return reference::winograd_input_transform<T>(src, output_shape, winograd_info);
     }
 
     TensorType      _target{};
@@ -236,12 +233,13 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape input_shape, bool is_nchw_format, Size2D output_tile, DataType data_type)
+    void setup(TensorShape input_shape, Size2D output_tile, DataLayout data_layout, DataType data_type)
     {
-        TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), output_tile);
+        WinogradInfo winograd_info(output_tile, Size2D(input_shape[0], input_shape[1]), Size2D() /* Not needed */, PadStrideInfo() /* Not needed */, DataLayout::NCHW /* Not needed */);
+        TensorShape  output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
 
-        _target    = compute_target(input_shape, output_shape, is_nchw_format, output_tile, data_type);
-        _reference = compute_reference(input_shape, output_shape, is_nchw_format, output_tile, data_type);
+        _target    = compute_target(input_shape, output_shape, winograd_info, data_layout, data_type);
+        _reference = compute_reference(input_shape, output_shape, winograd_info, data_layout, data_type);
     }
 
 protected:
@@ -265,17 +263,15 @@
         }
     }
 
-    TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, const Size2D &output_tile, DataType data_type)
+    TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
     {
-        ARM_COMPUTE_UNUSED(is_nchw_format);
-
         // Create tensors
-        TensorType src = create_tensor<TensorType>(input_shape, data_type, 1);
-        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1);
+        TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
+        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
 
         // Create and configure function
         FunctionType filter_transform;
-        filter_transform.configure(&src, &dst, output_tile);
+        filter_transform.configure(&src, &dst, winograd_info);
 
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -295,17 +291,15 @@
         return dst;
     }
 
-    SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, const Size2D &output_tile, DataType data_type)
+    SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataLayout data_layout, DataType data_type)
     {
-        ARM_COMPUTE_UNUSED(is_nchw_format);
-
         // Create reference
-        SimpleTensor<T> src{ input_shape, data_type, 1 };
+        SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo(), data_layout };
 
         // Fill reference
         fill(src, 0, -1.f, 1.f);
 
-        return reference::winograd_filter_transform<T>(src, output_shape, output_tile);
+        return reference::winograd_filter_transform<T>(src, output_shape, winograd_info);
     }
 
     TensorType      _target{};
@@ -317,12 +311,12 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape input_shape, Size2D kernel_dims, Size2D output_convolved_dims, Size2D num_tiles, DataLayout data_layout, DataType data_type)
+    void setup(TensorShape input_shape, WinogradInfo winograd_info, DataType data_type)
     {
-        TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), output_convolved_dims, data_layout);
+        TensorShape output_shape = compute_winograd_output_transform_shape(TensorInfo(input_shape, 1, data_type), winograd_info);
 
-        _target    = compute_target(input_shape, output_shape, kernel_dims, output_convolved_dims, num_tiles, data_layout, data_type);
-        _reference = compute_reference(input_shape, output_shape, kernel_dims, output_convolved_dims, num_tiles, data_layout, data_type);
+        _target    = compute_target(input_shape, output_shape, winograd_info, data_type);
+        _reference = compute_reference(input_shape, output_shape, winograd_info, data_type);
     }
 
 protected:
@@ -346,16 +340,15 @@
         }
     }
 
-    TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &output_convolved_dims, Size2D &num_tiles, DataLayout data_layout,
-                              DataType data_type)
+    TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
     {
         // Create tensors
-        TensorType src = create_tensor<TensorType>(input_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
-        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), data_layout);
+        TensorType src = create_tensor<TensorType>(input_shape, data_type);
+        TensorType dst = create_tensor<TensorType>(output_shape, data_type, 1, 0, QuantizationInfo(), winograd_info.output_data_layout);
 
         // Create and configure function
         FunctionType output_transform;
-        output_transform.configure(&src, nullptr, &dst, kernel_dims, output_convolved_dims, num_tiles);
+        output_transform.configure(&src, nullptr, &dst, winograd_info);
 
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -375,17 +368,15 @@
         return dst;
     }
 
-    SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &output_convolved_dims, Size2D &num_tiles,
-                                      DataLayout data_layout,
-                                      DataType   data_type)
+    SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, const WinogradInfo &winograd_info, DataType data_type)
     {
         // Create reference
-        SimpleTensor<T> src{ input_shape, data_type, 1, 0, QuantizationInfo(), data_layout };
+        SimpleTensor<T> src{ input_shape, data_type };
 
         // Fill reference
         fill(src, 0, -1.f, 1.f);
 
-        return reference::winograd_output_transform<T>(src, output_shape, kernel_dims, num_tiles);
+        return reference::winograd_output_transform<T>(src, output_shape, winograd_info);
     }
 
     TensorType      _target{};
diff --git a/tests/validation/reference/Winograd.cpp b/tests/validation/reference/Winograd.cpp
index ad0dcbd..604e252 100644
--- a/tests/validation/reference/Winograd.cpp
+++ b/tests/validation/reference/Winograd.cpp
@@ -28,6 +28,8 @@
 
 #include "arm_compute/core/Types.h"
 
+#include <algorithm>
+
 namespace arm_compute
 {
 namespace test
@@ -39,153 +41,155 @@
 namespace
 {
 template <typename T>
-void winograd_filter_transform3x3(const SimpleTensor<T> &in, SimpleTensor<T> &out, const Size2D &output_tile)
+void initialize_matrix_transform(SimpleTensor<T> &src, const Size2D &output_tile_size, const Size2D &kernel_size, WinogradTransformType winograd_transform_type)
 {
-    const bool         is_2x2      = (output_tile.width == 2);
-    const unsigned int transf_side = is_2x2 ? 4u : 6u;
+    ARM_COMPUTE_ERROR_ON((output_tile_size != Size2D(2U, 2U)) && (output_tile_size != Size2D(4U, 4U)));
+    ARM_COMPUTE_ERROR_ON(kernel_size != Size2D(3U, 3U));
 
-    // Simple tensor for the 3x3 input tile
-    SimpleTensor<T> input_tile{ TensorShape(3u, 3u), in.data_type(), 1 };
-
-    // Simple tensor for the transformation matrix
-    SimpleTensor<T> trans_matrix{ TensorShape(3u, transf_side), in.data_type(), 1 };
-
-    // Simple tensor for the transformation matrix transpose
-    SimpleTensor<T> trans_matrix_transposed{ TensorShape(transf_side, 3u), in.data_type(), 1 };
-
-    // Simple tensor for the 3xSide temporary tile
-    SimpleTensor<T> tmp_tile{ TensorShape(3u, transf_side), in.data_type(), 1 };
-
-    // Simple tensor for the SidexSide output tile
-    SimpleTensor<T> transf_tile{ TensorShape(transf_side, transf_side), in.data_type(), 1 };
-
-    if(is_2x2)
+    // Winograd input transform matrices
+    static const float imatrix2x2_3x3[] =
     {
-        // Initialize 3x4 transformation matrix
-        // 1   | 0   | 0
-        // 0.5 | 0.5 | 0.5
-        // 0.5 |-0.5 | 0.5
-        // 0   | 0   | 1
-        trans_matrix[0 + 0 * 3] = 1.0f;
-        trans_matrix[1 + 0 * 3] = 0.0f;
-        trans_matrix[2 + 0 * 3] = 0.0f;
-        trans_matrix[0 + 1 * 3] = 0.5f;
-        trans_matrix[1 + 1 * 3] = 0.5f;
-        trans_matrix[2 + 1 * 3] = 0.5f;
-        trans_matrix[0 + 2 * 3] = 0.5f;
-        trans_matrix[1 + 2 * 3] = -0.5f;
-        trans_matrix[2 + 2 * 3] = 0.5f;
-        trans_matrix[0 + 3 * 3] = 0.0f;
-        trans_matrix[1 + 3 * 3] = 0.0f;
-        trans_matrix[2 + 3 * 3] = 1.0f;
+        1.0f, 0.0f, -1.0f, 0.0f,
+        0.0f, 1.0f, 1.0f, 0.0f,
+        0.0f, -1.0f, 1.0f, 0.0f,
+        0.0f, 1.0f, 0.0f, -1.0f
+    };
+
+    static const float imatrix4x4_3x3[] =
+    {
+        4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f,
+        0.0f, -4.0f, -4.0f, 1.0f, 1.0f, 0.0f,
+        0.0f, 4.0f, -4.0f, -1.0f, 1.0f, 0.0f,
+        0.0f, -2.0f, -1.0f, 2.0f, 1.0f, 0.0f,
+        0.0f, 2.0f, -1.0f, -2.0f, 1.0f, 0.0f,
+        0.0f, 4.0f, 0.0f, -5.0f, 0.0f, 1.0f,
+    };
+
+    // ------------------------------------------
+
+    // Winograd filter transform matrices
+    static const float fmatrix2x2_3x3[] =
+    {
+        1.0f, 0.0f, 0.0f,
+        0.5f, 0.5f, 0.5f,
+        0.5f, -0.5f, 0.5f,
+        0.0f, 0.0f, 1.0f
+    };
+
+    static const float fmatrix4x4_3x3[] =
+    {
+        0.25f, 0.0f, 0.0f,
+        -1.0f / 6.0f, -1.0f / 6.0f, -1.0f / 6.0f,
+        -1.0f / 6.0f, 1.0f / 6.0f, -1.0f / 6.0f,
+        1.0f / 24.0f, 1.0f / 12.0f, 1.0f / 6.0f,
+        1.0f / 24.0f, -1.0f / 12.0f, 1.0f / 6.0f,
+        0.0f, 0.0f, 1.0f
+    };
+
+    // ------------------------------------------
+
+    // Winograd output transform matrices
+    static const float omatrix2x2_3x3[] =
+    {
+        1.0f, 1.0f, 1.0f, 0.0f,
+        0.0f, 1.0f, -1.0f, -1.0f
+    };
+
+    static const float omatrix4x4_3x3[] =
+    {
+        1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f,
+        0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f,
+        0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f,
+        0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f
+    };
+
+    // ------------------------------------------
+
+    using WinogradKey = std::tuple<std::pair<int, int>, std::pair<int, int>, WinogradTransformType>;
+
+    // Key = (Output tile size, Kernel size, Winograd transform type)
+    static std::map<WinogradKey, const float *> matrix_map =
+    {
+        { WinogradKey(std::pair<int, int>(2, 2), std::pair<int, int>(3, 3), WinogradTransformType::INPUT), imatrix2x2_3x3 },
+        { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(3, 3), WinogradTransformType::INPUT), imatrix4x4_3x3 },
+        { WinogradKey(std::pair<int, int>(2, 2), std::pair<int, int>(3, 3), WinogradTransformType::FILTER), fmatrix2x2_3x3 },
+        { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(3, 3), WinogradTransformType::FILTER), fmatrix4x4_3x3 },
+        { WinogradKey(std::pair<int, int>(2, 2), std::pair<int, int>(3, 3), WinogradTransformType::OUTPUT), omatrix2x2_3x3 },
+        { WinogradKey(std::pair<int, int>(4, 4), std::pair<int, int>(3, 3), WinogradTransformType::OUTPUT), omatrix4x4_3x3 },
+    };
+
+    // Find input matrix transform
+    std::map<WinogradKey, const float *>::iterator it;
+
+    it = matrix_map.find(WinogradKey(std::pair<int, int>(output_tile_size.width, output_tile_size.height),
+                                     std::pair<int, int>(kernel_size.width, kernel_size.height),
+                                     winograd_transform_type));
+
+    float const *matrix_values = nullptr;
+    if(it != matrix_map.end())
+    {
+        // Get matrix pointer
+        matrix_values = it->second;
     }
     else
     {
-        // Initialize 3x6 transformation matrix
-        //   1/4  |    0   |   0
-        //  -1/6  |  -1/6  | -1/6
-        //  -1/6  |   1/6  | -1/6
-        //  1/24  |  1/12  |  1/6
-        //  1/24  | -1/12  |  1/6
-        //    0   |    0   |   1
-        trans_matrix[0 + 0 * 3] = 1.0f / 4.0f;
-        trans_matrix[1 + 0 * 3] = 0.0f;
-        trans_matrix[2 + 0 * 3] = 0.0f;
-        trans_matrix[0 + 1 * 3] = -1.0f / 6.0f;
-        trans_matrix[1 + 1 * 3] = -1.0f / 6.0f;
-        trans_matrix[2 + 1 * 3] = -1.0f / 6.0f;
-        trans_matrix[0 + 2 * 3] = -1.0f / 6.0f;
-        trans_matrix[1 + 2 * 3] = 1.0f / 6.0f;
-        trans_matrix[2 + 2 * 3] = -1.0f / 6.0f;
-        trans_matrix[0 + 3 * 3] = 1.0f / 24.0f;
-        trans_matrix[1 + 3 * 3] = 1.0f / 12.0f;
-        trans_matrix[2 + 3 * 3] = 1.0f / 6.0f;
-        trans_matrix[0 + 4 * 3] = 1.0f / 24.0f;
-        trans_matrix[1 + 4 * 3] = -1.0f / 12.0f;
-        trans_matrix[2 + 4 * 3] = 1.0f / 6.0f;
-        trans_matrix[0 + 5 * 3] = 0.0f;
-        trans_matrix[1 + 5 * 3] = 0.0f;
-        trans_matrix[2 + 5 * 3] = 1.0f;
+        ARM_COMPUTE_ERROR("Winograd configuration not supported");
     }
 
-    // Transpose the transformation matrix
-    transpose_matrix(trans_matrix, trans_matrix_transposed);
-
-    const int num_channels = in.shape()[2];
-    const int num_filters  = in.shape()[3];
-    const int num_batches  = in.shape().total_size() / (9 * num_channels * num_filters);
-
-    for(int n = 0; n < num_batches; ++n)
-    {
-        for(int w = 0; w < num_filters; ++w)
-        {
-            for(int z = 0; z < num_channels; ++z)
-            {
-                // Load the 3x3 tile from the input tensor
-                get_tile(in, input_tile, Coordinates(0, 0, z, w, n));
-
-                // First transformation
-                matrix_multiply(trans_matrix, input_tile, tmp_tile);
-
-                // Second transformation
-                matrix_multiply(tmp_tile, trans_matrix_transposed, transf_tile);
-
-                // Store the 4x4 output tile across the 16 channels
-                const int output_offset = w + z * num_filters;
-
-                for(unsigned int out_h = 0, out_pos = 0; out_h < transf_side; ++out_h)
-                {
-                    for(unsigned int out_w = 0; out_w < transf_side; ++out_w, ++out_pos)
-                    {
-                        out[output_offset + out_pos * num_filters * num_channels] = transf_tile[out_w + out_h * transf_side];
-                    }
-                }
-            }
-        }
-    }
+    // Copy values
+    std::copy(&matrix_values[0], &matrix_values[0] + src.num_elements(), &src[0]);
 }
+} // namespace
 
 template <typename T>
-void winograd_input_transform3x3(const SimpleTensor<T> &src, SimpleTensor<T> &dst, const PadStrideInfo &conv_info)
+SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info)
 {
-    TensorShape shape4x4(4u, 4u);
+    ARM_COMPUTE_ERROR_ON(in.data_layout() != DataLayout::NCHW);
 
-    // Simple tensor for the 4x4 input tile
-    SimpleTensor<T> src_tile{ shape4x4, src.data_type() };
+    const PadStrideInfo conv_info        = winograd_info.convolution_info;
+    const Size2D        output_tile_size = winograd_info.output_tile_size;
+    const Size2D        kernel_size      = winograd_info.kernel_size;
 
-    // Simple tensor for the 4x4 temporary tile
-    SimpleTensor<T> tmp_tile{ shape4x4, src.data_type() };
+    SimpleTensor<T> out{ output_shape, in.data_type() };
 
-    // Simple tensor for the 4x4 output tile
-    SimpleTensor<T> dst_tile{ shape4x4, src.data_type() };
+    // Calculate dimensions for the tile
+    const unsigned int tile_w = output_tile_size.width + kernel_size.width - 1;
+    const unsigned int tile_h = output_tile_size.height + kernel_size.height - 1;
+
+    TensorShape tile_dims(tile_w, tile_h);
+
+    // Simple tensor for the input tile
+    SimpleTensor<T> src_tile{ tile_dims, in.data_type() };
+
+    // Simple tensor for the temporary tile
+    SimpleTensor<T> tmp_tile{ tile_dims, in.data_type() };
+
+    // Simple tensor for the output tile
+    SimpleTensor<T> dst_tile{ tile_dims, in.data_type() };
 
     // Simple tensor for the transformation matrix
-    SimpleTensor<T> matrix{ shape4x4, src.data_type() };
+    SimpleTensor<T> matrix{ tile_dims, in.data_type() };
 
     // Simple tensor for the transformation matrix transposed
-    SimpleTensor<T> matrix_transposed{ shape4x4, src.data_type() };
+    SimpleTensor<T> matrix_transposed{ tile_dims, in.data_type() };
 
-    const float matrix_values[] = { 1.f, 0.f, -1.f, 0.f,
-                                    0.f, 1.f, 1.f, 0.f,
-                                    0.f, -1.f, 1.f, 0.f,
-                                    0.f, 1.f, 0.f, -1.f
-                                  };
+    // Initialize matrix for the input transform
+    initialize_matrix_transform(matrix, output_tile_size, kernel_size, WinogradTransformType::INPUT);
 
-    for(int i = 0; i < matrix.num_elements(); ++i)
-    {
-        matrix[i] = matrix_values[i];
-    }
-
+    // Transpose matrix
     transpose_matrix(matrix, matrix_transposed);
 
-    const int in_w        = src.shape().x();
-    const int in_h        = src.shape().y();
-    const int in_d        = src.shape().z();
-    const int num_batches = src.shape().total_size() / (in_w * in_h * in_d);
-    const int num_tiles_x = std::ceil((in_w - 2 + conv_info.pad_left() + conv_info.pad_right()) / 2.0f);
-    const int num_tiles_y = std::ceil((in_h - 2 + conv_info.pad_top() + conv_info.pad_bottom()) / 2.0f);
+    const int in_w        = in.shape().x();
+    const int in_h        = in.shape().y();
+    const int in_d        = in.shape().z();
+    const int out_d       = out.shape().z();
+    const int num_batches = in.shape().total_size() / (in_w * in_h * in_d);
+    const int num_tiles_x = std::ceil((in_w - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right()) / static_cast<float>(output_tile_size.width));
+    const int num_tiles_y = std::ceil((in_h - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom()) / static_cast<float>(output_tile_size.height));
+    const int step_x      = output_tile_size.width;
+    const int step_y      = output_tile_size.height;
 
-    ARM_COMPUTE_ERROR_ON((num_tiles_x * num_tiles_y) != static_cast<int>(dst.shape().y()));
+    ARM_COMPUTE_ERROR_ON((num_tiles_x * num_tiles_y) != static_cast<int>(out.shape().y()));
 
     for(int b = 0; b < num_batches; ++b)
     {
@@ -195,61 +199,154 @@
             {
                 for(int x = 0; x < num_tiles_x; ++x)
                 {
-                    int xi = x * 2 - conv_info.pad_left();
-                    int yi = y * 2 - conv_info.pad_top();
+                    int xi = x * step_x - conv_info.pad_left();
+                    int yi = y * step_y - conv_info.pad_top();
 
-                    // Get the 4x4 tile from the input tensor
-                    get_tile(src, src_tile, Coordinates(xi, yi, z, b));
+                    // Get the tile from the input tensor
+                    get_tile(in, src_tile, Coordinates(xi, yi, z, b));
 
                     // Compute the transformation
                     matrix_multiply(matrix, src_tile, tmp_tile);
                     matrix_multiply(tmp_tile, matrix_transposed, dst_tile);
 
-                    // Store the 4x4 output tile across the 16 channels
-                    for(int i = 0; i < 16; ++i)
+                    // Store the output tile across the channels
+                    for(int i = 0; i < out_d; ++i)
                     {
                         int xo = z;
                         int yo = x + y * num_tiles_x;
-                        dst[coords2index(dst.shape(), Coordinates(xo, yo, i, b))] = dst_tile[i];
+                        out[coords2index(out.shape(), Coordinates(xo, yo, i, b))] = dst_tile[i];
                     }
                 }
             }
         }
     }
+
+    return out;
 }
 
 template <typename T>
-void winograd_output_transform3x3(const SimpleTensor<T> &in, SimpleTensor<T> &out, int num_tiles_x)
+SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info)
 {
-    ARM_COMPUTE_ERROR_ON(in.shape()[2] != 16);
-    ARM_COMPUTE_ERROR_ON(in.shape()[0] != out.shape()[2]);
+    ARM_COMPUTE_ERROR_ON_MSG(in.data_layout() != DataLayout::NCHW, "Only supported NCHW data format");
 
-    // Simple tensor for the 3x3 input tile
-    SimpleTensor<T> input_tile{ TensorShape(4u, 4u), in.data_type(), 1 };
+    // Create reference
+    SimpleTensor<T> out{ output_shape, in.data_type(), 1 };
+
+    const Size2D output_tile_size = winograd_info.output_tile_size;
+    const Size2D kernel_size      = winograd_info.kernel_size;
+
+    TensorShape kernel_tile_dims(kernel_size.width, kernel_size.height);
+
+    // Calculate dimensions for the tile
+    const unsigned int input_tile_w    = output_tile_size.width + kernel_size.width - 1;
+    const unsigned int input_tile_h    = output_tile_size.height + kernel_size.height - 1;
+    const unsigned int input_tile_area = input_tile_w * input_tile_h;
+
+    // Simple tensor for the input tile
+    SimpleTensor<T> input_tile{ kernel_tile_dims, in.data_type(), 1 };
 
     // Simple tensor for the transformation matrix
-    SimpleTensor<T> trans_matrix{ TensorShape(4u, 2u), in.data_type(), 1 };
+    SimpleTensor<T> trans_matrix{ TensorShape(kernel_tile_dims[0], input_tile_w), in.data_type(), 1 };
 
     // Simple tensor for the transformation matrix transpose
-    SimpleTensor<T> trans_matrix_transposed{ TensorShape(2u, 4u), in.data_type(), 1 };
+    SimpleTensor<T> trans_matrix_transposed{ TensorShape(input_tile_w, kernel_tile_dims[0]), in.data_type(), 1 };
 
-    // Simple tensor for the 4x3 temporary tile
-    SimpleTensor<T> tmp_tile{ TensorShape(4u, 2u), in.data_type(), 1 };
+    // Simple tensor for the temporary tile
+    SimpleTensor<T> tmp_tile{ TensorShape(kernel_tile_dims[0], input_tile_w), in.data_type(), 1 };
 
-    // Simple tensor for the 4x4 output tile
-    SimpleTensor<T> output_tile{ TensorShape(2u, 2u), in.data_type(), 1 };
+    // Simple tensor for the output tile
+    SimpleTensor<T> transf_tile{ TensorShape(input_tile_w, input_tile_w), in.data_type(), 1 };
 
-    // Initialize transformation matrix
-    // 1   | 1   | 1   | 1
-    // 0   | 1   | -1  | -1
-    trans_matrix[0 + 0 * 4] = 1.0f;
-    trans_matrix[1 + 0 * 4] = 1.0f;
-    trans_matrix[2 + 0 * 4] = 1.0f;
-    trans_matrix[3 + 0 * 4] = 0.0f;
-    trans_matrix[0 + 1 * 4] = 0.0f;
-    trans_matrix[1 + 1 * 4] = 1.0f;
-    trans_matrix[2 + 1 * 4] = -1.0f;
-    trans_matrix[3 + 1 * 4] = -1.0f;
+    // Initialize matrix for the filter transform
+    initialize_matrix_transform(trans_matrix, output_tile_size, kernel_size, WinogradTransformType::FILTER);
+
+    // Transpose the transformation matrix
+    transpose_matrix(trans_matrix, trans_matrix_transposed);
+
+    const int num_channels = in.shape()[2];
+    const int num_filters  = in.shape()[3];
+    const int num_batches  = in.shape().total_size() / (kernel_size.area() * num_channels * num_filters);
+
+    for(int n = 0; n < num_batches; ++n)
+    {
+        for(int w = 0; w < num_filters; ++w)
+        {
+            for(int z = 0; z < num_channels; ++z)
+            {
+                // Load the tile from the input tensor
+                get_tile(in, input_tile, Coordinates(0, 0, z, w, n));
+
+                // First transformation
+                matrix_multiply(trans_matrix, input_tile, tmp_tile);
+
+                // Second transformation
+                matrix_multiply(tmp_tile, trans_matrix_transposed, transf_tile);
+
+                // Store the output tile across the channels
+                const int output_offset = w + z * num_filters;
+
+                // Store the values across the channels
+                for(unsigned int i = 0; i < input_tile_area; ++i)
+                {
+                    out[output_offset + i * num_filters * num_channels] = transf_tile[i];
+                }
+            }
+        }
+    }
+
+    return out;
+}
+
+template <typename T>
+SimpleTensor<T> winograd_output_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info)
+{
+    ARM_COMPUTE_ERROR_ON_MSG(winograd_info.output_data_layout != DataLayout::NCHW, "Only supported NCHW data format");
+
+    const PadStrideInfo conv_info        = winograd_info.convolution_info;
+    const Size2D        input_dimensions = winograd_info.input_dimensions;
+    const Size2D        output_tile_size = winograd_info.output_tile_size;
+    const Size2D        kernel_size      = winograd_info.kernel_size;
+
+    // Create reference
+    SimpleTensor<T> out{ output_shape, in.data_type(), 1 };
+
+    // Calculate dimensions for the tiles
+    const unsigned int in_tile_w  = output_tile_size.width + kernel_size.width - 1;
+    const unsigned int in_tile_h  = output_tile_size.height + kernel_size.height - 1;
+    const unsigned int out_tile_w = output_tile_size.width;
+    const unsigned int out_tile_h = output_tile_size.height;
+
+    ARM_COMPUTE_ERROR_ON(in.shape()[2] != (in_tile_w * in_tile_h));
+    ARM_COMPUTE_ERROR_ON(in.shape()[0] != out.shape()[2]);
+
+    // Compute tile dimensions
+    // Input tile dimensions
+    TensorShape in_tile_dims(in_tile_w, in_tile_h);
+
+    // Output tile dimensions
+    TensorShape out_tile_dims(output_tile_size.width, output_tile_size.height);
+
+    // Transformation matrix dimensions
+    TensorShape tr_tile_dims(in_tile_w, output_tile_size.width);
+
+    // Create tensors
+    // Simple tensor for the input tile
+    SimpleTensor<T> input_tile{ in_tile_dims, in.data_type(), 1 };
+
+    // Simple tensor for the transformation matrix
+    SimpleTensor<T> trans_matrix{ tr_tile_dims, in.data_type(), 1 };
+
+    // Simple tensor for the transformation matrix transpose
+    SimpleTensor<T> trans_matrix_transposed{ TensorShape(tr_tile_dims[1], tr_tile_dims[0]), in.data_type(), 1 };
+
+    // Simple tensor for the temporary tile
+    SimpleTensor<T> tmp_tile{ tr_tile_dims, in.data_type(), 1 };
+
+    // Simple tensor for the output tile
+    SimpleTensor<T> output_tile{ out_tile_dims, in.data_type(), 1 };
+
+    // Initialize matrix for the output transform
+    initialize_matrix_transform(trans_matrix, output_tile_size, kernel_size, WinogradTransformType::OUTPUT);
 
     // Transpose the transformation matrix
     transpose_matrix(trans_matrix, trans_matrix_transposed);
@@ -272,13 +369,22 @@
     const int stridez_out = stridey_out * h_out;
     const int stridew_out = stridez_out * c_out;
 
+    // Compute number of elements to process in the X and Y direction
+    const int num_elements_x = input_dimensions.width - (kernel_size.width - 1) + conv_info.pad_left() + conv_info.pad_right();
+    const int num_elements_y = input_dimensions.height - (kernel_size.height - 1) + conv_info.pad_top() + conv_info.pad_bottom();
+    const int num_tiles_x    = std::ceil(num_elements_x / static_cast<float>(output_tile_size.width));
+    const int num_tiles_y    = std::ceil(num_elements_y / static_cast<float>(output_tile_size.height));
+
+    ARM_COMPUTE_UNUSED(num_tiles_y);
+    ARM_COMPUTE_ERROR_ON(in.shape()[1] != static_cast<unsigned int>(num_tiles_x * num_tiles_y));
+
     for(int n = 0; n < num_batches; ++n)
     {
         for(int y = 0; y < h_in; ++y)
         {
             for(int x = 0; x < w_in; ++x)
             {
-                // Load the 4x4 tile across the 16 channels of the input tensor
+                // Load the input tile tile across the channels of the input tensor
                 for(int z = 0; z < c_in; ++z)
                 {
                     input_tile[z] = in[x + (y * stridey_in) + (z * stridez_in) + (n * stridew_in)];
@@ -290,102 +396,34 @@
                 // Second transformation
                 matrix_multiply(tmp_tile, trans_matrix_transposed, output_tile);
 
-                // Store the 2x2 output tile
-                const int xo = (y % num_tiles_x) * 2;
-                const int yo = (y / num_tiles_x) * 2;
+                // Store the output tile
+                const int xo = (y % num_tiles_x) * out_tile_w;
+                const int yo = (y / num_tiles_x) * out_tile_h;
                 const int zo = x;
 
-                const int output_offset                  = xo + (yo * stridey_out) + (zo * stridez_out) + (n * stridew_out);
-                out[output_offset + 0 * stridey_out + 0] = output_tile[0 + 0 * 2];
+                const int output_offset = xo + (yo * stridey_out) + (zo * stridez_out) + (n * stridew_out);
 
-                // Check out-of-bound writes
-                if(xo + 1 < w_out)
+                for(int yi = 0; yi < static_cast<int>(out_tile_h); ++yi)
                 {
-                    out[output_offset + 0 * stridey_out + 1] = output_tile[1 + 0 * 2];
-                }
-
-                if(yo + 1 < h_out)
-                {
-                    out[output_offset + 1 * stridey_out + 0] = output_tile[0 + 1 * 2];
-                }
-
-                if((yo + 1 < h_out) && (xo + 1 < w_out))
-                {
-                    out[output_offset + 1 * stridey_out + 1] = output_tile[1 + 1 * 2];
+                    for(int xi = 0; xi < static_cast<int>(out_tile_w); ++xi)
+                    {
+                        // Check out-of-bound writes
+                        if((xo + xi < w_out) && (yo + yi < h_out))
+                        {
+                            out[output_offset + yi * stridey_out + xi] = output_tile[xi + yi * out_tile_w];
+                        }
+                    }
                 }
             }
         }
     }
-}
-} // namespace
-
-template <typename T>
-SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims)
-{
-    ARM_COMPUTE_ERROR_ON(kernel_dims.width != kernel_dims.height);
-    ARM_COMPUTE_ERROR_ON(src.data_layout() != DataLayout::NCHW);
-
-    SimpleTensor<T> dst{ dst_shape, src.data_type() };
-
-    switch(kernel_dims.width)
-    {
-        case 3:
-            winograd_input_transform3x3(src, dst, conv_info);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Only 3x3 kernels are supported");
-    }
-
-    return dst;
-}
-
-template <typename T>
-SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const Size2D &output_tile)
-{
-    ARM_COMPUTE_ERROR_ON_MSG(in.data_layout() != DataLayout::NCHW, "Only supported NCHW data format");
-
-    // Create reference
-    SimpleTensor<T> out{ output_shape, in.data_type(), 1 };
-
-    switch(in.shape()[0])
-    {
-        case 3:
-            winograd_filter_transform3x3(in, out, output_tile);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Only supported 3x3 kernel");
-            break;
-    }
 
     return out;
 }
 
-template <typename T>
-SimpleTensor<T> winograd_output_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &num_tiles)
-{
-    ARM_COMPUTE_ERROR_ON_MSG(in.data_layout() != DataLayout::NCHW, "Only supported NCHW data format");
-    ARM_COMPUTE_ERROR_ON(kernel_dims.width != kernel_dims.height);
-    ARM_COMPUTE_ERROR_ON(in.shape()[1] != num_tiles.area());
-
-    // Create reference
-    SimpleTensor<T> out{ output_shape, in.data_type(), 1 };
-
-    switch(kernel_dims.width)
-    {
-        case 3:
-            winograd_output_transform3x3(in, out, num_tiles.width);
-            break;
-        default:
-            ARM_COMPUTE_ERROR("Only supported 3x3 kernel");
-            break;
-    }
-
-    return out;
-}
-
-template SimpleTensor<float> winograd_input_transform(const SimpleTensor<float> &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims);
-template SimpleTensor<float> winograd_filter_transform(const SimpleTensor<float> &in, const TensorShape &output_shape, const Size2D &output_tile);
-template SimpleTensor<float> winograd_output_transform(const SimpleTensor<float> &in, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &num_tiles);
+template SimpleTensor<float> winograd_filter_transform(const SimpleTensor<float> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
+template SimpleTensor<float> winograd_input_transform(const SimpleTensor<float> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
+template SimpleTensor<float> winograd_output_transform(const SimpleTensor<float> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/Winograd.h b/tests/validation/reference/Winograd.h
index 62e136b..29181f1 100644
--- a/tests/validation/reference/Winograd.h
+++ b/tests/validation/reference/Winograd.h
@@ -36,14 +36,22 @@
 {
 namespace reference
 {
-template <typename T>
-SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims);
+/** Winograd transform type */
+enum class WinogradTransformType
+{
+    INPUT,  /**< Winograd input transform */
+    FILTER, /**< Winograd filter transform */
+    OUTPUT  /**< Winograd output transform */
+};
 
 template <typename T>
-SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const Size2D &output_tile);
+SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
 
 template <typename T>
-SimpleTensor<T> winograd_output_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const Size2D &kernel_dims, const Size2D &num_tiles);
+SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
+
+template <typename T>
+SimpleTensor<T> winograd_output_transform(const SimpleTensor<T> &in, const TensorShape &output_shape, const WinogradInfo &winograd_info);
 } // namespace reference
 } // namespace validation
 } // namespace test