COMPMID-935 - Implementing Convolution with Winograd on OpenCL (part 2)

Implemented Winograd Filter Transform 3x3 on OpenCL

Change-Id: I8f2b2dd938c5c000ef7ce392a37fb7b8b4202a4e
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/122708
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: Jenkins <bsgcomp@arm.com>
diff --git a/tests/datasets/ShapeDatasets.h b/tests/datasets/ShapeDatasets.h
index 4b56370..e939a6f 100644
--- a/tests/datasets/ShapeDatasets.h
+++ b/tests/datasets/ShapeDatasets.h
@@ -238,6 +238,38 @@
     }
 };
 
+/** Data set containing medium 3D tensor shapes. */
+class Medium3DShapes final : public ShapeDataset
+{
+public:
+    Medium3DShapes()
+        : ShapeDataset("Shape",
+    {
+        TensorShape{ 42U, 37U, 8U },
+                     TensorShape{ 57U, 60U, 13U },
+                     TensorShape{ 128U, 64U, 21U },
+                     TensorShape{ 83U, 72U, 14U }
+    })
+    {
+    }
+};
+
+/** Data set containing medium 4D tensor shapes. */
+class Medium4DShapes final : public ShapeDataset
+{
+public:
+    Medium4DShapes()
+        : ShapeDataset("Shape",
+    {
+        TensorShape{ 42U, 37U, 8U, 15U },
+                     TensorShape{ 57U, 60U, 13U, 8U },
+                     TensorShape{ 128U, 64U, 21U, 13U },
+                     TensorShape{ 83U, 72U, 14U, 5U }
+    })
+    {
+    }
+};
+
 /** Data set containing large tensor shapes. */
 class LargeShapes final : public ShapeDataset
 {
diff --git a/tests/datasets/WinogradFilterTransformDataset.h b/tests/datasets/WinogradFilterTransformDataset.h
new file mode 100644
index 0000000..07d0283
--- /dev/null
+++ b/tests/datasets/WinogradFilterTransformDataset.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2018 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_TEST_WINOGRAD_FILTER_TRANSFORM_DATASET
+#define ARM_COMPUTE_TEST_WINOGRAD_FILTER_TRANSFORM_DATASET
+
+#include "utils/TypePrinter.h"
+
+#include "arm_compute/core/TensorShape.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class WinogradFilterTransformDataset
+{
+public:
+    using type = std::tuple<TensorShape, bool>;
+
+    struct iterator
+    {
+        iterator(std::vector<TensorShape>::const_iterator a_it,
+                 std::vector<bool>::const_iterator        is_nchw_it)
+            : _a_it{ std::move(a_it) },
+              _is_nchw_it{ std::move(is_nchw_it) }
+        {
+        }
+
+        std::string description() const
+        {
+            std::stringstream description;
+            description << "Input=" << *_a_it << ":";
+            description << "IsNCHW=" << *_is_nchw_it << ":";
+            return description.str();
+        }
+
+        WinogradFilterTransformDataset::type operator*() const
+        {
+            return std::make_tuple(*_a_it, *_is_nchw_it);
+        }
+
+        iterator &operator++()
+        {
+            ++_a_it;
+            ++_is_nchw_it;
+
+            return *this;
+        }
+
+    private:
+        std::vector<TensorShape>::const_iterator _a_it;
+        std::vector<bool>::const_iterator        _is_nchw_it;
+    };
+
+    iterator begin() const
+    {
+        return iterator(_a_shapes.begin(), _is_nchw.begin());
+    }
+
+    int size() const
+    {
+        return std::min(_a_shapes.size(), _is_nchw.size());
+    }
+
+    void add_config(TensorShape a, bool is_nchw)
+    {
+        _a_shapes.emplace_back(std::move(a));
+        _is_nchw.emplace_back(std::move(is_nchw));
+    }
+
+protected:
+    WinogradFilterTransformDataset()                                  = default;
+    WinogradFilterTransformDataset(WinogradFilterTransformDataset &&) = default;
+
+private:
+    std::vector<TensorShape> _a_shapes{};
+    std::vector<bool>        _is_nchw{};
+};
+
+class SmallWinogradFilterTransformDataset final : public WinogradFilterTransformDataset
+{
+public:
+    SmallWinogradFilterTransformDataset()
+    {
+        add_config(TensorShape(3U, 3U, 7U, 4U), true);
+        add_config(TensorShape(3U, 3U, 4U, 13U), true);
+        add_config(TensorShape(3U, 3U, 9U, 2U), true);
+        add_config(TensorShape(3U, 3U, 3U, 5U), true);
+    }
+};
+
+class LargeWinogradFilterTransformDataset final : public WinogradFilterTransformDataset
+{
+public:
+    LargeWinogradFilterTransformDataset()
+    {
+        add_config(TensorShape(3U, 3U, 32U, 64U), true);
+        add_config(TensorShape(3U, 3U, 51U, 13U), true);
+        add_config(TensorShape(3U, 3U, 53U, 47U), true);
+        add_config(TensorShape(3U, 3U, 128U, 384U), true);
+    }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_TEST_WINOGRAD_FILTER_TRANSFORM_DATASET */
diff --git a/tests/validation/CL/Winograd.cpp b/tests/validation/CL/Winograd.cpp
index 664b3f4..0b21ed2 100644
--- a/tests/validation/CL/Winograd.cpp
+++ b/tests/validation/CL/Winograd.cpp
@@ -18,15 +18,20 @@
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONCLCTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
+#include "arm_compute/core/CL/kernels/CLWinogradFilterTransformKernel.h"
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
 #include "arm_compute/runtime/CL/CLTensorAllocator.h"
 #include "arm_compute/runtime/CL/functions/CLWinogradInputTransform.h"
 #include "tests/CL/CLAccessor.h"
+#include "tests/CL/Helper.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/datasets/WinogradFilterTransformDataset.h"
 #include "tests/datasets/WinogradInputTransformDataset.h"
 #include "tests/framework/Asserts.h"
 #include "tests/framework/Macros.h"
@@ -40,6 +45,13 @@
 {
 namespace validation
 {
+namespace
+{
+constexpr AbsoluteTolerance<float> tolerance_f32(0.0001f);
+} // namespace
+
+using namespace arm_compute::misc::shape_calculator;
+
 TEST_SUITE(CL)
 TEST_SUITE(Winograd)
 
@@ -125,11 +137,76 @@
 {
     validate(CLAccessor(_target), _reference);
 }
+TEST_SUITE_END() // InputTransform
 
-TEST_SUITE_END()
+TEST_SUITE(FilterTransform)
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
+                                                framework::dataset::make("InputInfo",{
+                                                                                        TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::F16),     // F16 not supported
+                                                                                        TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::QASYMM8), // QASYMM8 not supported
+                                                                                        TensorInfo(TensorShape(5U, 5U, 5U, 3U), 1, DataType::F32),     // Kernel size not supported
+                                                                                        TensorInfo(TensorShape(3U, 3U), 1, DataType::F32),             // valid
+                                                                                        TensorInfo(TensorShape(3U, 3U, 5U, 3U), 1, DataType::F32),     // valid
+                                                                                        TensorInfo(TensorShape(3U, 3U, 37U, 2U), 1, DataType::F32),    // valid
+                                                                                        TensorInfo(TensorShape(3U, 3U, 37U, 22U), 1, DataType::F32)    // valid
+                                                                                    }),
+                                                framework::dataset::make("OutputInfo", {
+                                                                                        TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::F16),
+                                                                                        TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::QASYMM8),
+                                                                                        TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::F32),
+                                                                                        TensorInfo(TensorShape(1U, 1U, 16U), 1, DataType::F32),
+                                                                                        TensorInfo(TensorShape(3U, 5U, 16U), 1, DataType::F32),
+                                                                                        TensorInfo(TensorShape(2U, 37U, 16U), 1, DataType::F32),
+                                                                                        TensorInfo(TensorShape(22U, 37U, 16U), 1, DataType::F32)
+                                                                                    })),
+                                                framework::dataset::make("Expected", { false, false, false, true, true, true, true })),
+                                            input_info, output_info, expected)
+{
+    ARM_COMPUTE_EXPECT(bool(CLWinogradFilterTransformKernel::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
 
-TEST_SUITE_END()
-TEST_SUITE_END()
+using CLWinogradFilterTransform        = CLSynthetizeFunctionWithZeroConstantBorder<CLWinogradFilterTransformKernel, 0>;
+using CLWinogradFilterTransformFixture = WinogradFilterTransformValidationFixture<CLTensor, CLAccessor, CLWinogradFilterTransform, float>;
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallWinogradFilterTransformDataset(), datasets::LargeWinogradFilterTransformDataset()),
+                                                                   framework::dataset::make("DataType", { DataType::F32 })),
+               shape_a, is_nchw_format, data_type)
+{
+    ARM_COMPUTE_UNUSED(is_nchw_format);
+
+    TensorShape shape_b = compute_winograd_filter_transform_shape(TensorInfo(shape_a, 1, data_type));
+
+    // Create tensors
+    CLTensor a = create_tensor<CLTensor>(shape_a, data_type);
+    CLTensor b = create_tensor<CLTensor>(shape_b, data_type);
+
+    ARM_COMPUTE_EXPECT(a.info()->is_resizable(), framework::LogLevel::ERRORS);
+    ARM_COMPUTE_EXPECT(b.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+    // Create and configure function
+    CLWinogradFilterTransform winograd_filter_transform;
+    winograd_filter_transform.configure(&a, &b);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLWinogradFilterTransformFixture, framework::DatasetMode::ALL, combine(datasets::SmallWinogradFilterTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLWinogradFilterTransformFixture, framework::DatasetMode::NIGHTLY, combine(datasets::LargeWinogradFilterTransformDataset(), framework::dataset::make("DataType", { DataType::F32 })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END() // FilterTransform
+
+TEST_SUITE_END() // Winograd
+TEST_SUITE_END() // CL
 } // namespace validation
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/validation/Helpers.h b/tests/validation/Helpers.h
old mode 100755
new mode 100644
diff --git a/tests/validation/fixtures/WinogradLayerFixture.h b/tests/validation/fixtures/WinogradLayerFixture.h
index 95e3315..bfe1efc 100644
--- a/tests/validation/fixtures/WinogradLayerFixture.h
+++ b/tests/validation/fixtures/WinogradLayerFixture.h
@@ -27,7 +27,6 @@
 #include "arm_compute/core/TensorShape.h"
 #include "arm_compute/core/Types.h"
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/runtime/NEON/NEScheduler.h"
 #include "tests/AssetsLibrary.h"
 #include "tests/Globals.h"
 #include "tests/IAccessor.h"
@@ -42,8 +41,6 @@
 
 namespace arm_compute
 {
-class NEWinogradLayer;
-
 namespace test
 {
 namespace validation
@@ -224,6 +221,87 @@
     TensorType      _target{};
     SimpleTensor<T> _reference{};
 };
+
+template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
+class WinogradFilterTransformValidationFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(TensorShape input_shape, bool is_nchw_format, DataType data_type)
+    {
+        TensorShape output_shape = compute_winograd_filter_transform_shape(TensorInfo(input_shape, 1, data_type));
+
+        _target    = compute_target(input_shape, output_shape, is_nchw_format, data_type);
+        _reference = compute_reference(input_shape, output_shape, is_nchw_format, data_type);
+    }
+
+protected:
+    template <typename U>
+    void fill(U &&tensor, int i, float min, float max)
+    {
+        switch(tensor.data_type())
+        {
+            case DataType::F32:
+            {
+                std::uniform_real_distribution<> distribution(min, max);
+                library->fill(tensor, distribution, i);
+                break;
+            }
+            default:
+            {
+                ARM_COMPUTE_ERROR("Not supported");
+                library->fill_tensor_uniform(tensor, i);
+                break;
+            }
+        }
+    }
+
+    TensorType compute_target(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, DataType data_type)
+    {
+        ARM_COMPUTE_UNUSED(is_nchw_format);
+
+        // Create tensors
+        TensorType src = create_tensor<TensorType>(input_shape, data_type);
+        TensorType dst = create_tensor<TensorType>(output_shape, data_type);
+
+        // Create and configure function
+        FunctionType filter_transform;
+        filter_transform.configure(&src, &dst);
+
+        ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Allocate tensors
+        src.allocator()->allocate();
+        dst.allocator()->allocate();
+
+        ARM_COMPUTE_EXPECT(!src.info()->is_resizable(), framework::LogLevel::ERRORS);
+        ARM_COMPUTE_EXPECT(!dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+        // Fill tensors
+        fill(AccessorType(src), 0, -1.f, 1.f);
+
+        filter_transform.run();
+
+        return dst;
+    }
+
+    SimpleTensor<T> compute_reference(const TensorShape &input_shape, const TensorShape &output_shape, bool is_nchw_format, DataType data_type)
+    {
+        ARM_COMPUTE_ERROR_ON(!is_nchw_format);
+
+        // Create reference
+        SimpleTensor<T> src{ input_shape, data_type, 1 };
+
+        // Fill reference
+        fill(src, 0, -1.f, 1.f);
+
+        return reference::winograd_filter_transform<T>(src, output_shape);
+    }
+
+    TensorType      _target{};
+    SimpleTensor<T> _reference{};
+};
 } // namespace validation
 } // namespace test
 } // namespace arm_compute
diff --git a/tests/validation/reference/Winograd.cpp b/tests/validation/reference/Winograd.cpp
index 371bb63..3ed55fb 100644
--- a/tests/validation/reference/Winograd.cpp
+++ b/tests/validation/reference/Winograd.cpp
@@ -26,6 +26,8 @@
 #include "tests/validation/Helpers.h"
 #include "tests/validation/reference/Utils.h"
 
+#include "arm_compute/core/Types.h"
+
 namespace arm_compute
 {
 namespace test
@@ -108,6 +110,87 @@
         }
     }
 }
+
+template <typename T>
+void winograd_filter_transform3x3(const SimpleTensor<T> &in, SimpleTensor<T> &out)
+{
+    // Simple tensor for the 3x3 input tile
+    SimpleTensor<T> input_tile{ TensorShape(3u, 3u), in.data_type(), 1 };
+
+    // Simple tensor for the transformation matrix
+    SimpleTensor<T> trans_matrix{ TensorShape(3u, 4u), in.data_type(), 1 };
+
+    // Simple tensor for the transformation matrix transpose
+    SimpleTensor<T> trans_matrix_transposed{ TensorShape(4u, 3u), in.data_type(), 1 };
+
+    // Simple tensor for the 4x3 temporary tile
+    SimpleTensor<T> tmp_tile{ TensorShape(3u, 4u), in.data_type(), 1 };
+
+    // Simple tensor for the 4x4 output tile
+    SimpleTensor<T> output_tile{ TensorShape(4u, 4u), in.data_type(), 1 };
+
+    // Initialize transformation matrix
+    // 1   | 0   | 0
+    // 0.5 | 0.5 | 0.5
+    // 0.5 |-0.5 | 0.5
+    // 0   | 0   | 1
+    trans_matrix[0 + 0 * 3] = 1.0f;
+    trans_matrix[1 + 0 * 3] = 0.0f;
+    trans_matrix[2 + 0 * 3] = 0.0f;
+    trans_matrix[0 + 1 * 3] = 0.5f;
+    trans_matrix[1 + 1 * 3] = 0.5f;
+    trans_matrix[2 + 1 * 3] = 0.5f;
+    trans_matrix[0 + 2 * 3] = 0.5f;
+    trans_matrix[1 + 2 * 3] = -0.5f;
+    trans_matrix[2 + 2 * 3] = 0.5f;
+    trans_matrix[0 + 3 * 3] = 0.0f;
+    trans_matrix[1 + 3 * 3] = 0.0f;
+    trans_matrix[2 + 3 * 3] = 1.0f;
+
+    // Transpose the transformation matrix
+    transpose_matrix(trans_matrix, trans_matrix_transposed);
+
+    const int num_channels = in.shape()[2];
+    const int num_filters  = in.shape()[3];
+    const int num_batches  = in.shape().total_size() / (9 * num_channels * num_filters);
+
+    for(int n = 0; n < num_batches; ++n)
+    {
+        for(int w = 0; w < num_filters; ++w)
+        {
+            for(int z = 0; z < num_channels; ++z)
+            {
+                // Load the 3x3 tile from the input tensor
+                get_tile(in, input_tile, Coordinates(0, 0, z, w, n));
+
+                // First transformation
+                matrix_multiply(trans_matrix, input_tile, tmp_tile);
+
+                // Second transformation
+                matrix_multiply(tmp_tile, trans_matrix_transposed, output_tile);
+
+                // Store the 4x4 output tile across the 16 channels
+                const int output_offset                              = w + z * num_filters;
+                out[output_offset + 0 * num_filters * num_channels]  = output_tile[0 + 0 * 4];
+                out[output_offset + 1 * num_filters * num_channels]  = output_tile[1 + 0 * 4];
+                out[output_offset + 2 * num_filters * num_channels]  = output_tile[2 + 0 * 4];
+                out[output_offset + 3 * num_filters * num_channels]  = output_tile[3 + 0 * 4];
+                out[output_offset + 4 * num_filters * num_channels]  = output_tile[0 + 1 * 4];
+                out[output_offset + 5 * num_filters * num_channels]  = output_tile[1 + 1 * 4];
+                out[output_offset + 6 * num_filters * num_channels]  = output_tile[2 + 1 * 4];
+                out[output_offset + 7 * num_filters * num_channels]  = output_tile[3 + 1 * 4];
+                out[output_offset + 8 * num_filters * num_channels]  = output_tile[0 + 2 * 4];
+                out[output_offset + 9 * num_filters * num_channels]  = output_tile[1 + 2 * 4];
+                out[output_offset + 10 * num_filters * num_channels] = output_tile[2 + 2 * 4];
+                out[output_offset + 11 * num_filters * num_channels] = output_tile[3 + 2 * 4];
+                out[output_offset + 12 * num_filters * num_channels] = output_tile[0 + 3 * 4];
+                out[output_offset + 13 * num_filters * num_channels] = output_tile[1 + 3 * 4];
+                out[output_offset + 14 * num_filters * num_channels] = output_tile[2 + 3 * 4];
+                out[output_offset + 15 * num_filters * num_channels] = output_tile[3 + 3 * 4];
+            }
+        }
+    }
+}
 } // namespace
 
 template <typename T>
@@ -130,7 +213,29 @@
     return dst;
 }
 
+template <typename T>
+SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape)
+{
+    ARM_COMPUTE_ERROR_ON_MSG(in.data_layout() != DataLayout::NCHW, "Only supported NCHW data format");
+
+    // Create reference
+    SimpleTensor<T> out{ output_shape, in.data_type(), 1 };
+
+    switch(in.shape()[0])
+    {
+        case 3:
+            winograd_filter_transform3x3(in, out);
+            break;
+        default:
+            ARM_COMPUTE_ERROR("Only supported 3x3 kernel");
+            break;
+    }
+
+    return out;
+}
+
 template SimpleTensor<float> winograd_input_transform(const SimpleTensor<float> &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims);
+template SimpleTensor<float> winograd_filter_transform(const SimpleTensor<float> &in, const TensorShape &output_shape);
 } // namespace reference
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/reference/Winograd.h b/tests/validation/reference/Winograd.h
index ed95239..ba8e5c1 100644
--- a/tests/validation/reference/Winograd.h
+++ b/tests/validation/reference/Winograd.h
@@ -24,6 +24,8 @@
 #ifndef __ARM_COMPUTE_TEST_WINOGRAD_H__
 #define __ARM_COMPUTE_TEST_WINOGRAD_H__
 
+#include "arm_compute/core/TensorShape.h"
+
 #include "tests/SimpleTensor.h"
 
 namespace arm_compute
@@ -36,6 +38,9 @@
 {
 template <typename T>
 SimpleTensor<T> winograd_input_transform(const SimpleTensor<T> &src, const TensorShape &dst_shape, const PadStrideInfo &conv_info, const Size2D &kernel_dims);
+
+template <typename T>
+SimpleTensor<T> winograd_filter_transform(const SimpleTensor<T> &in, const TensorShape &output_shape);
 } // namespace reference
 } // namespace validation
 } // namespace test