COMPMID-1959: Implements 2D FFT on OpenCL

Change-Id: I73cf3984a5463acc854c8a59dc2bd9a5234cd99c
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/936
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
diff --git a/tests/validation/CL/ConvolutionLayer.cpp b/tests/validation/CL/ConvolutionLayer.cpp
index 41d2b7b..f1f9b59 100644
--- a/tests/validation/CL/ConvolutionLayer.cpp
+++ b/tests/validation/CL/ConvolutionLayer.cpp
@@ -46,7 +46,7 @@
 namespace
 {
 constexpr AbsoluteTolerance<float>  absolute_tolerance_float(0.0001f);    /**< Absolute Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
-RelativeTolerance<float>            tolerance_f32(0.05f);                 /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
+RelativeTolerance<float>            tolerance_f32(0.1f);                  /**< Tolerance value for comparing reference's output against implementation's output for DataType::F32 */
 RelativeTolerance<half_float::half> tolerance_f16(half_float::half(0.2)); /**< Tolerance value for comparing reference's output against implementation's output for DataType::F16 */
 constexpr AbsoluteTolerance<float>  tolerance_qasymm8(1);                 /**< Tolerance value for comparing reference's output against implementation's output for quantized data types */
 constexpr float                     tolerance_num = 0.07f;                /**< Tolerance number */
diff --git a/tests/validation/CL/FFT.cpp b/tests/validation/CL/FFT.cpp
index 0d29532..9fdd85b 100644
--- a/tests/validation/CL/FFT.cpp
+++ b/tests/validation/CL/FFT.cpp
@@ -24,7 +24,10 @@
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
 #include "arm_compute/runtime/CL/functions/CLFFT1D.h"
+#include "arm_compute/runtime/CL/functions/CLFFT2D.h"
+#include "arm_compute/runtime/CL/functions/CLFFTConvolutionLayer.h"
 #include "tests/CL/CLAccessor.h"
+#include "tests/datasets/SmallConvolutionLayerDataset.h"
 #include "tests/framework/Asserts.h"
 #include "tests/framework/Macros.h"
 #include "tests/framework/datasets/Datasets.h"
@@ -40,7 +43,7 @@
 namespace
 {
 const auto data_types = framework::dataset::make("DataType", { DataType::F32 });
-const auto shapes     = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 2U, 3U),
+const auto shapes_1d  = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 2U, 3U),
                                                                   TensorShape(4U, 2U, 3U), TensorShape(5U, 2U, 3U),
                                                                   TensorShape(7U, 2U, 3U), TensorShape(8U, 2U, 3U),
                                                                   TensorShape(9U, 2U, 3U), TensorShape(25U, 2U, 3U),
@@ -48,11 +51,27 @@
                                                                   TensorShape(16U, 2U, 3U), TensorShape(32U, 2U, 3U),
                                                                   TensorShape(96U, 2U, 2U)
                                                                 });
+const auto shapes_2d = framework::dataset::make("TensorShape", { TensorShape(2U, 2U, 3U), TensorShape(3U, 6U, 3U),
+                                                                 TensorShape(4U, 5U, 3U), TensorShape(5U, 7U, 3U),
+                                                                 TensorShape(7U, 25U, 3U), TensorShape(8U, 2U, 3U),
+                                                                 TensorShape(9U, 16U, 3U), TensorShape(25U, 32U, 3U),
+                                                                 TensorShape(192U, 128U, 2U)
+                                                               });
+
+const auto ActivationFunctionsSmallDataset = framework::dataset::make("ActivationInfo",
+{
+    ActivationLayerInfo(),
+    ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU, 0.5f)
+});
+
+RelativeTolerance<float> tolerance_f32(0.1f);   /**< Relative tolerance value for FP32 */
+constexpr float          tolerance_num = 0.07f; /**< Tolerance number */
+
 } // namespace
 TEST_SUITE(CL)
 TEST_SUITE(FFT1D)
 
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes, data_types),
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes_1d, data_types),
                shape, data_type)
 {
     // Create tensors
@@ -81,19 +100,19 @@
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
         framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Mismatching data types
                                                 TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Mismatching shapes
-                                                TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32), // Invalid channels
+                                                TensorInfo(TensorShape(32U, 13U, 2U), 3, DataType::F32), // Invalid channels
                                                 TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Unsupported axis
                                                 TensorInfo(TensorShape(11U, 13U, 2U), 2, DataType::F32), // Undecomposable FFT
                                                 TensorInfo(TensorShape(25U, 13U, 2U), 2, DataType::F32),
         }),
         framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F16),
                                                 TensorInfo(TensorShape(16U, 13U, 2U), 2, DataType::F32),
-                                                TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32),
                                                 TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32),
                                                 TensorInfo(TensorShape(11U, 13U, 2U), 2, DataType::F32),
                                                 TensorInfo(TensorShape(25U, 13U, 2U), 2, DataType::F32),
         })),
-        framework::dataset::make("Axis", { 0, 0, 0, 1, 0, 0 })),
+        framework::dataset::make("Axis", { 0, 0, 0, 2, 0, 0 })),
         framework::dataset::make("Expected", { false, false, false, false, false, true })),
         input_info, output_info, axis, expected)
 {
@@ -106,19 +125,103 @@
 // *INDENT-ON*
 
 template <typename T>
-using CLFFT1DFixture = FFTValidationFixture<CLTensor, CLAccessor, CLFFT1D, T>;
+using CLFFT1DFixture = FFTValidationFixture<CLTensor, CLAccessor, CLFFT1D, FFT1DInfo, T>;
 
 TEST_SUITE(Float)
 TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT1DFixture<float>, framework::DatasetMode::ALL, combine(shapes, framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT1DFixture<float>, framework::DatasetMode::ALL, combine(shapes_1d, framework::dataset::make("DataType", DataType::F32)))
 {
     // Validate output
-    validate(CLAccessor(_target), _reference, RelativeTolerance<float>(0.1f), 0.05f);
+    validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
 }
 TEST_SUITE_END() // FP32
 TEST_SUITE_END() // Float
 
 TEST_SUITE_END() // FFT1D
+
+TEST_SUITE(FFT2D)
+
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(shapes_2d, data_types),
+               shape, data_type)
+{
+    // Create tensors
+    CLTensor src = create_tensor<CLTensor>(shape, data_type, 2);
+    CLTensor dst = create_tensor<CLTensor>(shape, data_type, 2);
+
+    ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
+    ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
+
+    // Create and configure function
+    CLFFT2D fft2d;
+    fft2d.configure(&src, &dst, FFT2DInfo());
+
+    // Validate valid region
+    const ValidRegion valid_region = shape_to_valid_region(shape);
+    validate(src.info()->valid_region(), valid_region);
+    validate(dst.info()->valid_region(), valid_region);
+
+    // Validate padding
+    validate(src.info()->padding(), PaddingSize());
+    validate(dst.info()->padding(), PaddingSize());
+}
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
+        framework::dataset::make("InputInfo", { TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32), // Mismatching data types
+                                                TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32), // Mismatching shapes
+                                                TensorInfo(TensorShape(32U, 25U, 2U), 3, DataType::F32), // Invalid channels
+                                                TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32), // Undecomposable FFT
+                                                TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32),
+        }),
+        framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F16),
+                                                TensorInfo(TensorShape(16U, 25U, 2U), 2, DataType::F32),
+                                                TensorInfo(TensorShape(32U, 25U, 2U), 1, DataType::F32),
+                                                TensorInfo(TensorShape(32U, 13U, 2U), 2, DataType::F32),
+                                                TensorInfo(TensorShape(32U, 25U, 2U), 2, DataType::F32),
+        })),
+        framework::dataset::make("Expected", { false, false, false, false, true })),
+               input_info, output_info, expected)
+{
+    const Status s = CLFFT2D::validate(&input_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false), FFT2DInfo());
+    ARM_COMPUTE_EXPECT(bool(s) == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+template <typename T>
+using CLFFT2DFixture = FFTValidationFixture<CLTensor, CLAccessor, CLFFT2D, FFT2DInfo, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLFFT2DFixture<float>, framework::DatasetMode::ALL, combine(shapes_2d, framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+TEST_SUITE_END() // FFT2D
+
+TEST_SUITE(FFTConvolutionLayer)
+
+template <typename T>
+using CLFFTConvolutionLayerFixture = FFTConvolutionValidationFixture<CLTensor, CLAccessor, CLFFTConvolutionLayer, T>;
+
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLFFTConvolutionLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::SmallFFTConvolutionLayerDataset(),
+                                                                                                                 framework::dataset::make("DataType", DataType::F32)),
+                                                                                                                 framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+                                                                                                                 ActivationFunctionsSmallDataset))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32, tolerance_num);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE_END() // Float
+TEST_SUITE_END() // FFTConvolutionLayer
+
 TEST_SUITE_END() // CL
 } // namespace validation
 } // namespace test
diff --git a/tests/validation/CL/ReductionOperation.cpp b/tests/validation/CL/ReductionOperation.cpp
index c8474e9..79308c8 100644
--- a/tests/validation/CL/ReductionOperation.cpp
+++ b/tests/validation/CL/ReductionOperation.cpp
@@ -63,7 +63,7 @@
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
     framework::dataset::make("InputInfo",          { TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Mismatching data type input/output
-                                                     TensorInfo(TensorShape(128U, 64U), 2, DataType::F32), // Number of Input channels != 1
+                                                     TensorInfo(TensorShape(128U, 64U), 3, DataType::F32), // Number of Input channels != 1
                                                      TensorInfo(TensorShape(128U, 64U), 1, DataType::S16), // DataType != QASYMM8/F16/F32
                                                      TensorInfo(TensorShape(128U, 64U), 1, DataType::F32), // Axis >= num_max_dimensions
                                                      TensorInfo(TensorShape(128U, 64U), 1, DataType::QASYMM8), // Axis == 0 and SUM_SQUARE and QASYMM8