COMPMID-1717: CL: Implement Maximum, Minimum, SquaredDifference

Change-Id: Ice653e48211053bd3cd20a693bd76de6b4efc370
Reviewed-on: https://review.mlplatform.org/270
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/validation/CL/ArithmeticDivision.cpp b/tests/validation/CL/ArithmeticDivision.cpp
index 5d4fa1f..87039d7 100644
--- a/tests/validation/CL/ArithmeticDivision.cpp
+++ b/tests/validation/CL/ArithmeticDivision.cpp
@@ -24,7 +24,7 @@
 #include "arm_compute/core/Types.h"
 #include "arm_compute/runtime/CL/CLTensor.h"
 #include "arm_compute/runtime/CL/CLTensorAllocator.h"
-#include "arm_compute/runtime/CL/functions/CLArithmeticDivision.h"
+#include "arm_compute/runtime/CL/functions/CLElementwiseOperations.h"
 #include "tests/CL/CLAccessor.h"
 #include "tests/PaddingCalculator.h"
 #include "tests/datasets/ConvertPolicyDataset.h"
@@ -33,7 +33,7 @@
 #include "tests/framework/Macros.h"
 #include "tests/framework/datasets/Datasets.h"
 #include "tests/validation/Validation.h"
-#include "tests/validation/fixtures/ArithmeticDivisionFixture.h"
+#include "tests/validation/fixtures/ElementwiseOperationsFixture.h"
 
 namespace arm_compute
 {
@@ -45,6 +45,20 @@
 {
 RelativeTolerance<float> tolerance_fp32(0.000001f);
 RelativeTolerance<float> tolerance_fp16(0.001f);
+
+constexpr unsigned int num_elems_processed_per_iteration = 16;
+/** Input data sets **/
+const auto ArithmeticDivisionU8Dataset = combine(combine(framework::dataset::make("DataType", DataType::U8), framework::dataset::make("DataType", DataType::U8)), framework::dataset::make("DataType",
+                                                 DataType::U8));
+const auto ArithmeticDivisionQASYMM8Dataset = combine(combine(framework::dataset::make("DataType", DataType::QASYMM8), framework::dataset::make("DataType", DataType::QASYMM8)),
+                                                      framework::dataset::make("DataType",
+                                                                               DataType::QASYMM8));
+const auto ArithmeticDivisionS16Dataset = combine(combine(framework::dataset::make("DataType", { DataType::U8, DataType::S16 }), framework::dataset::make("DataType", DataType::S16)),
+                                                  framework::dataset::make("DataType", DataType::S16));
+const auto ArithmeticDivisionFP16Dataset = combine(combine(framework::dataset::make("DataType", DataType::F16), framework::dataset::make("DataType", DataType::F16)),
+                                                   framework::dataset::make("DataType", DataType::F16));
+const auto ArithmeticDivisionFP32Dataset = combine(combine(framework::dataset::make("DataType", DataType::F32), framework::dataset::make("DataType", DataType::F32)),
+                                                   framework::dataset::make("DataType", DataType::F32));
 } // namespace
 
 TEST_SUITE(CL)
@@ -53,25 +67,25 @@
 // *INDENT-OFF*
 // clang-format off
 DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(
-               framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),      // Wrong data type
+               framework::dataset::make("Input1Info", { TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
+                                                        TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
                                                         TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),      // Window shrink
                                                         TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),      // Invalid data type combination
                                                         TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),     // Mismatching shapes
-                                                        TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
                                                       }),
                framework::dataset::make("Input2Info",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
+                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
                                                        TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
                                                        TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
-                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
                                                      })),
                framework::dataset::make("OutputInfo",{ TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::S16),
+                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
                                                        TensorInfo(TensorShape(27U, 13U, 2U), 1, DataType::U8),
                                                        TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::U8),
                                                        TensorInfo(TensorShape(48U, 11U, 2U), 1, DataType::F32),
-                                                       TensorInfo(TensorShape(32U, 13U, 2U), 1, DataType::F32),
                                                      })),
-               framework::dataset::make("Expected", { false, false, false, false, true })),
+               framework::dataset::make("Expected", { true, true, false, false, false})),
                input1_info, input2_info, output_info, expected)
 {
     ARM_COMPUTE_EXPECT(bool(CLArithmeticDivision::validate(&input1_info.clone()->set_is_resizable(false), &input2_info.clone()->set_is_resizable(false), &output_info.clone()->set_is_resizable(false))) == expected, framework::LogLevel::ERRORS);
@@ -82,17 +96,128 @@
 template <typename T>
 using CLArithmeticDivisionFixture = ArithmeticDivisionValidationFixture<CLTensor, CLAccessor, CLArithmeticDivision, T>;
 
-TEST_SUITE(Float)
-TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticDivisionFixture<half>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F16)))
+TEST_SUITE(U8)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()),
+               shape)
+{
+    // Create tensors
+    CLTensor ref_src1 = create_tensor<CLTensor>(shape, DataType::U8);
+    CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::U8);
+    CLTensor dst      = create_tensor<CLTensor>(shape, DataType::U8);
+
+    // Create and Configure function
+    CLArithmeticDivision add;
+    add.configure(&ref_src1, &ref_src2, &dst);
+
+    // Validate valid region
+    const ValidRegion valid_region = shape_to_valid_region(shape);
+    validate(dst.info()->valid_region(), valid_region);
+
+    // Validate padding
+    const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
+    validate(ref_src1.info()->padding(), padding);
+    validate(ref_src2.info()->padding(), padding);
+    validate(dst.info()->padding(), padding);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticDivisionFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), ArithmeticDivisionU8Dataset))
 {
     // Validate output
-    validate(CLAccessor(_target), _reference, tolerance_fp16);
+    validate(CLAccessor(_target), _reference);
 }
-TEST_SUITE_END() // FP16
+TEST_SUITE_END()
+
+template <typename T>
+using CLArithmeticDivisionQuantizedFixture = ArithmeticDivisionValidationQuantizedFixture<CLTensor, CLAccessor, CLArithmeticDivision, T>;
+
+TEST_SUITE(Quantized)
+TEST_SUITE(QASYMM8)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()),
+               shape)
+{
+    // Create tensors
+    CLTensor ref_src1 = create_tensor<CLTensor>(shape, DataType::QASYMM8);
+    CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::QASYMM8);
+    CLTensor dst      = create_tensor<CLTensor>(shape, DataType::QASYMM8);
+
+    // Create and Configure function
+    CLArithmeticDivision add;
+    add.configure(&ref_src1, &ref_src2, &dst);
+
+    // Validate valid region
+    const ValidRegion valid_region = shape_to_valid_region(shape);
+    validate(dst.info()->valid_region(), valid_region);
+
+    // Validate padding
+    const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
+    validate(ref_src1.info()->padding(), padding);
+    validate(ref_src2.info()->padding(), padding);
+    validate(dst.info()->padding(), padding);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticDivisionQuantizedFixture<uint8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(combine(datasets::SmallShapes(),
+                       ArithmeticDivisionQASYMM8Dataset),
+                       framework::dataset::make("QuantizationInfo", { QuantizationInfo(5.f / 255.f, 20) })),
+                       framework::dataset::make("QuantizationInfo", { QuantizationInfo(2.f / 255.f, 10) })),
+                       framework::dataset::make("QuantizationInfo", { QuantizationInfo(1.f / 255.f, 5) }))
+
+                      )
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_fp32, 0.01);
+}
+TEST_SUITE_END()
+TEST_SUITE_END()
+
+TEST_SUITE(S16)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()), framework::dataset::make("DataType", { DataType::U8, DataType::S16 })),
+               shape, data_type)
+{
+    // Create tensors
+    CLTensor ref_src1 = create_tensor<CLTensor>(shape, data_type);
+    CLTensor ref_src2 = create_tensor<CLTensor>(shape, DataType::S16);
+    CLTensor dst      = create_tensor<CLTensor>(shape, DataType::S16);
+
+    // Create and Configure function
+    CLArithmeticDivision add;
+    add.configure(&ref_src1, &ref_src2, &dst);
+
+    // Validate valid region
+    const ValidRegion valid_region = shape_to_valid_region(shape);
+    validate(dst.info()->valid_region(), valid_region);
+
+    // Validate padding
+    const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
+    validate(ref_src1.info()->padding(), padding);
+    validate(ref_src2.info()->padding(), padding);
+    validate(dst.info()->padding(), padding);
+}
+
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticDivisionFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), ArithmeticDivisionS16Dataset))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticDivisionFixture<int16_t>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), ArithmeticDivisionS16Dataset))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference);
+}
+TEST_SUITE_END()
+
+TEST_SUITE(Float)
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticDivisionFixture<half>, framework::DatasetMode::ALL, combine(datasets::SmallShapes(), ArithmeticDivisionFP16Dataset))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_fp16, 0.01);
+}
+TEST_SUITE_END()
 
 TEST_SUITE(FP32)
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, concat(datasets::SmallShapes(), datasets::LargeShapes()), shape)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, framework::dataset::concat(datasets::SmallShapes(), datasets::LargeShapes()),
+               shape)
 {
     // Create tensors
     CLTensor ref_src1 = create_tensor<CLTensor>(shape, DataType::F32);
@@ -100,27 +225,27 @@
     CLTensor dst      = create_tensor<CLTensor>(shape, DataType::F32);
 
     // Create and Configure function
-    CLArithmeticDivision div;
-    div.configure(&ref_src1, &ref_src2, &dst);
+    CLArithmeticDivision add;
+    add.configure(&ref_src1, &ref_src2, &dst);
 
     // Validate valid region
     const ValidRegion valid_region = shape_to_valid_region(shape);
     validate(dst.info()->valid_region(), valid_region);
 
     // Validate padding
-    const PaddingSize padding = PaddingCalculator(shape.x(), 16).required_padding();
+    const PaddingSize padding = PaddingCalculator(shape.x(), num_elems_processed_per_iteration).required_padding();
     validate(ref_src1.info()->padding(), padding);
     validate(ref_src2.info()->padding(), padding);
     validate(dst.info()->padding(), padding);
 }
 
-FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticDivisionFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunSmall, CLArithmeticDivisionFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapes(), ArithmeticDivisionFP32Dataset))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_fp32);
 }
 
-FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticDivisionFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), framework::dataset::make("DataType", DataType::F32)))
+FIXTURE_DATA_TEST_CASE(RunLarge, CLArithmeticDivisionFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapes(), ArithmeticDivisionFP32Dataset))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_fp32);
@@ -130,23 +255,23 @@
 using CLArithmeticDivisionBroadcastFixture = ArithmeticDivisionBroadcastValidationFixture<CLTensor, CLAccessor, CLArithmeticDivision, T>;
 
 FIXTURE_DATA_TEST_CASE(RunSmallBroadcast, CLArithmeticDivisionBroadcastFixture<float>, framework::DatasetMode::PRECOMMIT, combine(datasets::SmallShapesBroadcast(),
-                       framework::dataset::make("DataType", DataType::F32)))
+                       ArithmeticDivisionFP32Dataset))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_fp32);
 }
 
 FIXTURE_DATA_TEST_CASE(RunLargeBroadcast, CLArithmeticDivisionBroadcastFixture<float>, framework::DatasetMode::NIGHTLY, combine(datasets::LargeShapesBroadcast(),
-                       framework::dataset::make("DataType", DataType::F32)))
+                       ArithmeticDivisionFP32Dataset))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_fp32);
 }
-TEST_SUITE_END() // FP32
-TEST_SUITE_END() // Float
+TEST_SUITE_END()
+TEST_SUITE_END()
 
-TEST_SUITE_END() // ArithmeticDivision
-TEST_SUITE_END() // CL
+TEST_SUITE_END()
+TEST_SUITE_END()
 } // namespace validation
 } // namespace test
 } // namespace arm_compute