Fix for AI benchmark ResNet regression

* For 3x3 kernel, only choose the implementation with larger tile
  size if the input tensor is larger than the tile.

Resolves: COMPMID-5467
Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Change-Id: I2cf95ddb25f477cb05da3b3501e0afe9548fc33a
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8022
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/NEON/kernels/convolution/winograd/output_transforms_fp32.cpp b/src/core/NEON/kernels/convolution/winograd/output_transforms_fp32.cpp
index 87ad4b2..73abe8b 100644
--- a/src/core/NEON/kernels/convolution/winograd/output_transforms_fp32.cpp
+++ b/src/core/NEON/kernels/convolution/winograd/output_transforms_fp32.cpp
@@ -45,7 +45,7 @@
 static const TransformImplementation<float> transforms_fp32[] = {
 #if defined(__aarch64__)
 #endif  // defined(__aarch64__)
-  { IMPL(4, 4, 3, 3, arm_fp32_4x4_3x3, Unpadded) },
+  { IMPL(4, 4, 3, 3, arm_fp32_4x4_3x3, Unpadded), MethodConstraints::LargerShape },
   { IMPL(2, 2, 3, 3, arm_fp32_2x2_3x3, Unpadded) },
   { IMPL(2, 2, 5, 5, arm_fp32_2x2_5x5, Unpadded) },
   { IMPL(1, 6, 1, 3, arm_fp32_1x6_1x3, Unpadded) },
diff --git a/src/core/NEON/kernels/convolution/winograd/winograd_implementations.hpp b/src/core/NEON/kernels/convolution/winograd/winograd_implementations.hpp
index a23cb1d..510f69b 100644
--- a/src/core/NEON/kernels/convolution/winograd/winograd_implementations.hpp
+++ b/src/core/NEON/kernels/convolution/winograd/winograd_implementations.hpp
@@ -38,6 +38,7 @@
   RequiresSVE2 = 0x2,
   RequiresSME  = 0x4,
   RequiresSME2 = 0x8,
+  LargerShape  = 0x10, // Input tensor shape is larger than the output transform tile shape.
 };
 
 constexpr inline bool operator!(const MethodConstraints &c)
@@ -66,6 +67,14 @@
   );
 }
 
+inline bool output_transform_constraints_met(const output_transform::ITransform *transform, const MethodConstraints &c, const CPUInfo *ci, const ConvolutionArgs &conv_args, const WinogradConfig *cfg)
+{
+  return (
+    constraints_met(c, ci, conv_args, cfg) &&
+    (!(c & MethodConstraints::LargerShape) || (conv_args.input_shape.rows > transform->get_output_rows() && conv_args.input_shape.cols > transform->get_output_cols()))
+  );
+}
+
 namespace weight_transform {
 
 template <typename TIn, typename TOut=TIn>
@@ -209,7 +218,7 @@
        impl->transform.get() != nullptr; impl++)
   {
     if(
-      constraints_met(impl->constraints, ci, conv_args,  cfg) &&
+      output_transform_constraints_met(impl->transform.get(), impl->constraints, ci, conv_args,  cfg) &&
       impl->transform->get_kernel_rows() == conv_args.kernel_shape.rows &&
       impl->transform->get_kernel_cols() == conv_args.kernel_shape.cols &&
       (cfg->output_rows == 0 || cfg->output_rows == impl->transform->get_output_rows()) &&
diff --git a/src/cpu/operators/CpuWinogradConv2d.cpp b/src/cpu/operators/CpuWinogradConv2d.cpp
index 7be2d6d..81cf651 100644
--- a/src/cpu/operators/CpuWinogradConv2d.cpp
+++ b/src/cpu/operators/CpuWinogradConv2d.cpp
@@ -252,9 +252,15 @@
             _permute_output->configure(&_output_nhwc, dst, PermutationVector(1U, 2U, 0U));
         }
 
+        // Configure input transform kernel
+        _transform_input_kernel = std::make_unique<CpuWinogradConv2dTransformInputKernel>(_winograd_impl, *_conv_args, nthreads);
+
         // Configure GEMM function
         _gemm_function->configure(&_winograd_transformed_input, &_winograd_transformed_weights, nullptr, &_winograd_transformed_output, 1.0f, 0.f);
 
+        // Configure output transform kernel
+        _transform_output_kernel = std::make_unique<CpuWinogradConv2dTransformOutputKernel>(_winograd_impl, *_conv_args, nthreads);
+
         //Configure Activation Layer
         _run_activation = act_info.enabled() && !fuse_function_supported(act_info);
         if(_run_activation)
@@ -331,8 +337,6 @@
     CpuAuxTensorHandler output_nhwc(offset_int_vec(PermutedOutput), _output_nhwc, tensors, true);
 
     ITensorPack transform_input_pack{ { ACL_SRC, is_nchw ? input_nhwc.get() : src }, { ACL_DST, winograd_input_transformed.get() }, { ACL_INT, input_workspace.get() } };
-    _transform_input_kernel = std::make_unique<CpuWinogradConv2dTransformInputKernel>(_winograd_impl, *_conv_args, nthreads);
-
     NEScheduler::get().schedule_op(_transform_input_kernel.get(), Window::DimX, win, transform_input_pack);
 
     CpuAuxTensorHandler winograd_weights_transformed(offset_int_vec(TransformedWeights), _winograd_transformed_weights, tensors, true);
@@ -346,7 +350,6 @@
     _gemm_function->run(gemm_pack);
 
     // Output transform
-    _transform_output_kernel = std::make_unique<CpuWinogradConv2dTransformOutputKernel>(_winograd_impl, *_conv_args, nthreads);
     ITensorPack transform_output_pack{ { ACL_SRC_0, winograd_output_transformed.get() }, { ACL_DST, is_nchw ? output_nhwc.get() : output }, { ACL_SRC_1, biases }, { ACL_INT, output_workspace.get() } };
     NEScheduler::get().schedule_op(_transform_output_kernel.get(), Window::DimX, win, transform_output_pack);
     if(is_nchw)