Implement OpenCL MatMul for Lhs NT Rhs T/NT FP32/16

 - Implement ClNativeMatMulKernel class
 - Implement opencl kernel for LHS non-transposed and RHS non-transposed
 - Implement opencl kernel for LHS non-transposed and RHS transposed
 - Add test fixture and dataset for matmul
 - Implement transpose_tensor() for reference implementation to transpose high dimensional tensors

Resolves: COMPMID-5944, COMPMID-5951

Co-authored-by: Gunes Bayir <gunes.bayir@arm.com>
Co-authored-by: Ramy Elgammal <ramy.elgammal@arm.com>
Change-Id: I1d5b8978f41be27baddb3153ade880472141573f
Signed-off-by: Gunes Bayir <gunes.bayir@arm.com>
Signed-off-by: Ramy Elgammal <ramy.elgammal@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9333
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/tests/datasets/BatchMatMulDataset.h b/tests/datasets/BatchMatMulDataset.h
new file mode 100644
index 0000000..dad7cc0
--- /dev/null
+++ b/tests/datasets/BatchMatMulDataset.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef TESTS_DATASETS_BATCHMATMULDATASET
+#define TESTS_DATASETS_BATCHMATMULDATASET
+
+#include "arm_compute/core/TensorShape.h"
+#include "utils/TypePrinter.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class BatchMatMulDataset
+{
+public:
+    using type = std::tuple<TensorShape, TensorShape, TensorShape>;
+
+    struct iterator
+    {
+        iterator(std::vector<TensorShape>::const_iterator a_it,
+                 std::vector<TensorShape>::const_iterator b_it,
+                 std::vector<TensorShape>::const_iterator dst_it)
+            : _a_it{ std::move(a_it) },
+              _b_it{ std::move(b_it) },
+              _dst_it{ std::move(dst_it) }
+        {
+        }
+
+        std::string description() const
+        {
+            std::stringstream description;
+            description << "A=" << *_a_it << ":";
+            description << "B=" << *_b_it << ":";
+            description << "Out=" << *_dst_it << ":";
+            return description.str();
+        }
+
+        BatchMatMulDataset::type operator*() const
+        {
+            return std::make_tuple(*_a_it, *_b_it, *_dst_it);
+        }
+
+        iterator &operator++()
+        {
+            ++_a_it;
+            ++_b_it;
+            ++_dst_it;
+
+            return *this;
+        }
+
+    private:
+        std::vector<TensorShape>::const_iterator _a_it;
+        std::vector<TensorShape>::const_iterator _b_it;
+        std::vector<TensorShape>::const_iterator _dst_it;
+    };
+
+    iterator begin() const
+    {
+        return iterator(_a_shapes.begin(), _b_shapes.begin(), _dst_shapes.begin());
+    }
+
+    int size() const
+    {
+        return std::min(_a_shapes.size(), std::min(_b_shapes.size(), _dst_shapes.size()));
+    }
+
+    void add_config(TensorShape a, TensorShape b, TensorShape dst)
+    {
+        _a_shapes.emplace_back(std::move(a));
+        _b_shapes.emplace_back(std::move(b));
+        _dst_shapes.emplace_back(std::move(dst));
+    }
+
+protected:
+    BatchMatMulDataset()                      = default;
+    BatchMatMulDataset(BatchMatMulDataset &&) = default;
+
+private:
+    std::vector<TensorShape> _a_shapes{};
+    std::vector<TensorShape> _b_shapes{};
+    std::vector<TensorShape> _dst_shapes{};
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* TESTS_DATASETS_BATCHMATMULDATASET */
diff --git a/tests/datasets/LargeBatchMatMulDataset.h b/tests/datasets/LargeBatchMatMulDataset.h
new file mode 100644
index 0000000..0d8ff91
--- /dev/null
+++ b/tests/datasets/LargeBatchMatMulDataset.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_DATASETS_LARGEBATCHMATMULDATASET
+#define ACL_TESTS_DATASETS_LARGEBATCHMATMULDATASET
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/datasets/BatchMatMulDataset.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class LargeBatchMatMulDataset final : public BatchMatMulDataset
+{
+public:
+    LargeBatchMatMulDataset()
+    {
+        add_config(TensorShape(21U, 13U, 3U, 2U), TensorShape(33U, 21U, 3U, 2U), TensorShape(33U, 13U, 3U, 2U));
+        add_config(TensorShape(38U, 12U, 1U, 5U), TensorShape(21U, 38U, 1U, 5U), TensorShape(21U, 12U, 1U, 5U));
+        add_config(TensorShape(45U, 38U, 3U, 2U), TensorShape(21U, 45U, 3U, 2U), TensorShape(21U, 38U, 3U, 2U));
+    }
+};
+
+class HighDimensionalBatchMatMulDataset final : public BatchMatMulDataset
+{
+public:
+    HighDimensionalBatchMatMulDataset()
+    {
+        add_config(TensorShape(5U, 5U, 2U, 2U, 2U, 2U), TensorShape(5U, 5U, 2U, 2U, 2U, 2U), TensorShape(5U, 5U, 2U, 2U, 2U, 2U)); // 6D tensor
+    }
+};
+
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ACL_TESTS_DATASETS_LARGEBATCHMATMULDATASET */
diff --git a/tests/datasets/SmallBatchMatMulDataset.h b/tests/datasets/SmallBatchMatMulDataset.h
new file mode 100644
index 0000000..cfe76be
--- /dev/null
+++ b/tests/datasets/SmallBatchMatMulDataset.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_DATASETS_SMALLBATCHMATMULDATASET
+#define ACL_TESTS_DATASETS_SMALLBATCHMATMULDATASET
+
+#include "arm_compute/core/TensorShape.h"
+#include "arm_compute/core/Types.h"
+#include "tests/datasets/BatchMatMulDataset.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace datasets
+{
+class SmallBatchMatMulDataset final : public BatchMatMulDataset
+{
+public:
+    SmallBatchMatMulDataset()
+    {
+        add_config(TensorShape(3U, 4U, 2U, 2U), TensorShape(2U, 3U, 2U, 2U), TensorShape(2U, 4U, 2U, 2U));
+        add_config(TensorShape(9U, 6U), TensorShape(5U, 9U), TensorShape(5U, 6U));
+        add_config(TensorShape(31U, 1U), TensorShape(23U, 31U), TensorShape(23U, 1U));
+        add_config(TensorShape(8U, 4U, 2U), TensorShape(16U, 8U, 2U), TensorShape(16U, 4U, 2U));
+        add_config(TensorShape(32U, 2U), TensorShape(17U, 32U), TensorShape(17U, 2U));
+    }
+};
+} // namespace datasets
+} // namespace test
+} // namespace arm_compute
+#endif /* ACL_TESTS_DATASETS_SMALLBATCHMATMULDATASET */
diff --git a/tests/validation/CL/BatchMatMul.cpp b/tests/validation/CL/BatchMatMul.cpp
new file mode 100644
index 0000000..fd84526
--- /dev/null
+++ b/tests/validation/CL/BatchMatMul.cpp
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "src/gpu/cl/kernels/ClNativeMatMulKernel.h"
+#include "tests/datasets/LargeBatchMatMulDataset.h"
+#include "tests/datasets/SmallBatchMatMulDataset.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/BatchMatMulFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+RelativeTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
+constexpr float          abs_tolerance_f32(
+    0.0001f); /**< Absolute tolerance value for comparing reference's output against implementation's output for floating point data types in case using relative tolerance fails because of small values */
+constexpr float abs_tolerance_f16(
+    0.001f);                                                   /**< Absolute tolerance value for comparing reference's output against implementation's output for fp16  data types in case using relative tolerance fails because of small values */
+RelativeTolerance<half_float::half> tolerance_f16(half(0.01)); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
+} // namespace
+
+/** M0 values to test --precommit*/
+const auto m0_values_precommit = framework::dataset::make("M0", { 1, 3 });
+
+/** N0 values to test --precommit*/
+const auto n0_values_precommit = framework::dataset::make("N0", { 2, 4 });
+
+/** K0 values to test --precommit*/
+const auto k0_values_precommit = framework::dataset::make("K0", { 2, 3 });
+
+/** M0 values to test --nightly*/
+const auto m0_values_nightly_lhs_nt = framework::dataset::make("M0", { 1, 2, 3, 4, 5, 6, 7, 8 });
+// const auto m0_values_nightly_lhs_t = framework::dataset::make("M0", { 1, 2, 3, 4, 8 }); // To be enabled
+
+/** N0 values to test --nightly*/
+const auto n0_values_nightly_rhs_nt = framework::dataset::make("N0", { 1, 2, 3, 4, 8, 16 });
+const auto n0_values_nightly_rhs_t  = framework::dataset::make("N0", { 1, 2, 3, 4, 8 });
+
+/** K0 values to test --nightly*/
+const auto k0_values_nightly_lhs_nt_rhs_nt = framework::dataset::make("K0", { 1, 2, 3, 4, 8, 16 });
+const auto k0_values_nightly_lhs_nt_rhs_t  = framework::dataset::make("K0", { 1, 2, 3, 4, 8 });
+// const auto k0_values_nightly_lhs_t_rhs_nt = framework::dataset::make("K0", { 1, 2, 3, 4, 5, 6, 7, 8 }); // To be enabled
+
+template <typename T>
+using CLBatchMatMulFixture = BatchMatMulValidationFixture<T>;
+
+TEST_SUITE(CL)
+TEST_SUITE(BatchMatMul)
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(zip(zip(
+                                                                      framework::dataset::make("LhsInfo",
+{
+    TensorInfo(TensorShape(27U, 13U), 1, DataType::S32), // Unsupported data type
+    TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),
+    TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),
+    TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),
+    TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),
+    TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),
+    TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),
+    TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),
+    TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),
+    TensorInfo(TensorShape(27U, 13U), 1, DataType::F32),
+}),
+framework::dataset::make("RhsInfo",
+{
+    TensorInfo(TensorShape(8U, 27U), 1, DataType::S32), TensorInfo(TensorShape(8U, 27U), 1, DataType::F32), TensorInfo(TensorShape(8U, 27U), 1, DataType::F32), TensorInfo(TensorShape(8U, 27U), 1, DataType::F32), TensorInfo(TensorShape(8U, 27U), 1, DataType::F32), TensorInfo(TensorShape(8U, 27U), 1, DataType::F32), TensorInfo(TensorShape(8U, 27U), 1, DataType::F32), TensorInfo(TensorShape(8U, 27U), 1, DataType::F32), TensorInfo(TensorShape(8U, 27U), 1, DataType::F32), TensorInfo(TensorShape(8U, 27U), 1, DataType::F32),
+})),
+framework::dataset::make("OutputInfo",
+{
+    TensorInfo(TensorShape(8U, 13U), 1, DataType::S32), TensorInfo(TensorShape(8U, 13U), 1, DataType::F32), TensorInfo(TensorShape(8U, 13U), 1, DataType::F32), TensorInfo(TensorShape(8U, 13U), 1, DataType::F32), TensorInfo(TensorShape(8U, 13U), 1, DataType::F32), TensorInfo(TensorShape(8U, 13U), 1, DataType::F32), TensorInfo(TensorShape(8U, 13U), 1, DataType::F32), TensorInfo(TensorShape(8U, 13U), 1, DataType::F32), TensorInfo(TensorShape(8U, 13U), 1, DataType::F32), TensorInfo(TensorShape(8U, 13U), 1, DataType::F32),
+})),
+framework::dataset::make("MatMulInfo",
+{
+    MatMulKernelInfo(false, false, 2, 2, 2, false), MatMulKernelInfo(false, false, 2, 2, 2, false), MatMulKernelInfo(false, false, 9, 2, 2, false), MatMulKernelInfo(false, false, 0, 2, 2, false), // M0 cannot be < 1
+    MatMulKernelInfo(false, true, 4, 5, 2, false),                                                                                                                                                  // For LHS NT RHS NT: N0 cannot be 5
+    MatMulKernelInfo(false, true, 4, 6, 2, false),                                                                                                                                                  // For LHS NT RHS NT: N0 cannot be 6
+    MatMulKernelInfo(false, true, 4, 9, 2, false),                                                                                                                                                  // For LHS NT RHS NT: N0 cannot be 9
+    MatMulKernelInfo(false, true, 4, 10, 2, false),                                                                                                                                                 // For LHS NT RHS NT: N0 cannot be 10
+    MatMulKernelInfo(false, true, 4, 11, 2, false),                                                                                                                                                 // For LHS NT RHS NT: N0 cannot be 11
+    MatMulKernelInfo(false, true, 4, 17, 2, false),                                                                                                                                                 // For LHS NT RHS NT: N0 cannot be 17
+})),
+framework::dataset::make("Expected", { false, true, true, false, false, false, false, false, false, false })),
+lhs_info, rhs_info, output_info, matmul_info, expected)
+{
+    bool is_valid = bool(ClNativeMatMulKernel::validate(&lhs_info, &rhs_info, &output_info, matmul_info));
+    ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
+}
+TEST_SUITE(Float)
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmallNoTranspose, CLBatchMatMulFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::SmallBatchMatMulDataset(),
+                                                                                                                      framework::dataset::make("pretransose_A", { false })),
+                                                                                                                      framework::dataset::make("pretransose_B", { false })),
+                                                                                                                      m0_values_precommit),
+                                                                                                                      n0_values_precommit),
+                                                                                                                      k0_values_precommit),
+                                                                                                              framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLBatchMatMulFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::SmallBatchMatMulDataset(),
+                                                                                                                        framework::dataset::make("pretransose_A", { false })),
+                                                                                                                        framework::dataset::make("pretransose_B", { true })),
+                                                                                                                        m0_values_precommit),
+                                                                                                                        n0_values_precommit),
+                                                                                                                        k0_values_precommit),
+                                                                                                                framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLBatchMatMulFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(datasets::LargeBatchMatMulDataset(),
+                                                                                                                  framework::dataset::make("pretransose_A", { false })),
+                                                                                                                  framework::dataset::make("pretransose_B", { false })),
+                                                                                                                  m0_values_nightly_lhs_nt),
+                                                                                                                  n0_values_nightly_rhs_nt),
+                                                                                                                  k0_values_nightly_lhs_nt_rhs_nt),
+                                                                                                                  framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32);
+}
+// Running High Dimensional test is enough for FP32, because we're stressing the number of dimensions, not data type or M0/N0/K0
+// It's a good idea to test for each Lhs/Rhs T/NT combinations because they're different CL kernels
+FIXTURE_DATA_TEST_CASE(RunHighDimNoTranspose, CLBatchMatMulFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::HighDimensionalBatchMatMulDataset(),
+                                                                                                                        framework::dataset::make("pretransose_A", { false })),
+                                                                                                                        framework::dataset::make("pretransose_B", { false })),
+                                                                                                                        framework::dataset::make("M0", { 2 })),
+                                                                                                                        framework::dataset::make("N0", { 2 })),
+                                                                                                                        framework::dataset::make("K0", { 2 })),
+                                                                                                                framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLBatchMatMulFixture<float>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(datasets::LargeBatchMatMulDataset(),
+                                                                                                                    framework::dataset::make("pretransose_A", { false })),
+                                                                                                                    framework::dataset::make("pretransose_B", { true })),
+                                                                                                                    m0_values_nightly_lhs_nt),
+                                                                                                                    n0_values_nightly_rhs_t),
+                                                                                                                    k0_values_nightly_lhs_nt_rhs_t),
+                                                                                                                    framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32);
+}
+FIXTURE_DATA_TEST_CASE(RunHighDimRhsTransposed, CLBatchMatMulFixture<float>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::HighDimensionalBatchMatMulDataset(),
+                                                                                                                  framework::dataset::make("pretransose_A", { false })),
+                                                                                                                  framework::dataset::make("pretransose_B", { true })),
+                                                                                                                  framework::dataset::make("M0", { 2 })),
+                                                                                                                  framework::dataset::make("N0", { 2 })),
+                                                                                                                  framework::dataset::make("K0", { 2 })),
+                                                                                                                  framework::dataset::make("DataType", DataType::F32)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32, 0.f, abs_tolerance_f32);
+}
+TEST_SUITE_END() // FP32
+
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmallNoTranspose, CLBatchMatMulFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::SmallBatchMatMulDataset(),
+                                                                                                                     framework::dataset::make("pretransose_A", { false })),
+                                                                                                                     framework::dataset::make("pretransose_B", { false })),
+                                                                                                                     m0_values_precommit),
+                                                                                                                     n0_values_precommit),
+                                                                                                                     k0_values_precommit),
+                                                                                                             framework::dataset::make("DataType", DataType::F16)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16);
+}
+FIXTURE_DATA_TEST_CASE(RunSmallRhsTransposed, CLBatchMatMulFixture<half>, framework::DatasetMode::ALL, combine(combine(combine(combine(combine(combine(datasets::SmallBatchMatMulDataset(),
+                                                                                                                       framework::dataset::make("pretransose_A", { false })),
+                                                                                                                       framework::dataset::make("pretransose_B", { true })),
+                                                                                                                       m0_values_precommit),
+                                                                                                                       n0_values_precommit),
+                                                                                                                       k0_values_precommit),
+                                                                                                               framework::dataset::make("DataType", DataType::F16)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16);
+}
+FIXTURE_DATA_TEST_CASE(RunLargeNoTranspose, CLBatchMatMulFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(datasets::LargeBatchMatMulDataset(),
+                                                                                                                 framework::dataset::make("pretransose_A", { false })),
+                                                                                                                 framework::dataset::make("pretransose_B", { false })),
+                                                                                                                 m0_values_nightly_lhs_nt),
+                                                                                                                 n0_values_nightly_rhs_nt),
+                                                                                                                 k0_values_nightly_lhs_nt_rhs_nt),
+                                                                                                                 framework::dataset::make("DataType", DataType::F16)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16);
+}
+FIXTURE_DATA_TEST_CASE(RunLargeRhsTransposed, CLBatchMatMulFixture<half>, framework::DatasetMode::NIGHTLY, combine(combine(combine(combine(combine(combine(datasets::LargeBatchMatMulDataset(),
+                                                                                                                   framework::dataset::make("pretransose_A", { false })),
+                                                                                                                   framework::dataset::make("pretransose_B", { true })),
+                                                                                                                   m0_values_nightly_lhs_nt),
+                                                                                                                   n0_values_nightly_rhs_t),
+                                                                                                                   k0_values_nightly_lhs_nt_rhs_t),
+                                                                                                                   framework::dataset::make("DataType", DataType::F16)))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16, 0.f, abs_tolerance_f16);
+}
+TEST_SUITE_END() // FP16
+
+TEST_SUITE_END() // Float
+TEST_SUITE_END() // BatchMatMul
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
diff --git a/tests/validation/fixtures/BatchMatMulFixture.h b/tests/validation/fixtures/BatchMatMulFixture.h
new file mode 100644
index 0000000..9fb2dcc
--- /dev/null
+++ b/tests/validation/fixtures/BatchMatMulFixture.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ACL_TESTS_VALIDATION_FIXTURES_BATCHMATMULFIXTURE
+#define ACL_TESTS_VALIDATION_FIXTURES_BATCHMATMULFIXTURE
+
+#include "arm_compute/core/KernelDescriptors.h"
+#include "src/gpu/cl/kernels/ClNativeMatMulKernel.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/CL/Helper.h"
+#include "tests/framework/Fixture.h"
+#include "tests/validation/reference/GEMM.h"
+#include "tests/validation/reference/Permute.h"
+#include "tests/validation/reference/ReshapeLayer.h"
+
+#include <random>
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+using namespace arm_compute::opencl::kernels;
+
+template <typename T>
+class BatchMatMulValidationFixture : public framework::Fixture
+{
+public:
+    template <typename...>
+    void setup(TensorShape shape_a, TensorShape shape_b, TensorShape output_shape, bool pretranspose_a, bool pretranspose_b, const int M0, const int N0, const int K0, DataType data_type)
+    {
+        // For brevity, the input shapes are assumed to be not-transposed for both Lhs and Rhs matrices.
+        if(pretranspose_a)
+        {
+            permute(shape_a, PermutationVector(1U, 0U));
+        }
+
+        if(pretranspose_b)
+        {
+            permute(shape_b, PermutationVector(1U, 0U));
+        }
+
+        _target    = compute_target(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, M0, N0, K0, data_type);
+        _reference = compute_reference(shape_a, shape_b, output_shape, pretranspose_a, pretranspose_b, data_type);
+    }
+
+protected:
+    template <typename U>
+    void fill(U &&tensor, int i, float lo = -1.f, float hi = 1.f)
+    {
+        switch(tensor.data_type())
+        {
+            case DataType::F16:
+            {
+                arm_compute::utils::uniform_real_distribution_16bit<half> distribution{ float(lo), float(hi) };
+                library->fill(tensor, distribution, i);
+                break;
+            }
+            case DataType::F32:
+            {
+                std::uniform_real_distribution<float> distribution(lo, hi);
+                library->fill(tensor, distribution, i);
+                break;
+            }
+            default:
+                library->fill_tensor_uniform(tensor, i);
+        }
+    }
+
+    CLTensor compute_target(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, const int M0, const int N0, const int K0,
+                            DataType data_type)
+    {
+        // Create tensors
+        CLTensor a   = create_tensor<CLTensor>(shape_a, data_type, 1);
+        CLTensor b   = create_tensor<CLTensor>(shape_b, data_type, 1);
+        CLTensor dst = create_tensor<CLTensor>(output_shape, data_type, 1);
+
+        CLSynthetizeOperator<ClNativeMatMulKernel> batchMatMul{};
+        MatMulKernelInfo                           matmul_info;
+        matmul_info.adj_lhs = pretranspose_a;
+        matmul_info.adj_rhs = pretranspose_b;
+        matmul_info.m0      = M0;
+        matmul_info.n0      = N0;
+        matmul_info.k0      = K0;
+
+        batchMatMul.configure(a.info(), b.info(), dst.info(), matmul_info);
+        ARM_COMPUTE_ASSERT(a.info()->is_resizable());
+        ARM_COMPUTE_ASSERT(b.info()->is_resizable());
+        ARM_COMPUTE_ASSERT(dst.info()->is_resizable());
+
+        // Allocate tensors
+        a.allocator()->allocate();
+        b.allocator()->allocate();
+        dst.allocator()->allocate();
+
+        ARM_COMPUTE_ASSERT(!a.info()->is_resizable());
+        ARM_COMPUTE_ASSERT(!b.info()->is_resizable());
+        ARM_COMPUTE_ASSERT(!dst.info()->is_resizable());
+
+        // Fill tensors
+        fill(CLAccessor(a), 0);
+        fill(CLAccessor(b), 1);
+
+        // Compute batchMatMul kernel
+        ITensorPack tensors_pack({ { ACL_SRC_0, &a },
+            { ACL_SRC_1, &b },
+            { ACL_DST, &dst }
+        });
+        batchMatMul.run(tensors_pack);
+
+        return dst;
+    }
+
+    SimpleTensor<T> compute_reference(const TensorShape &shape_a, const TensorShape &shape_b, const TensorShape &output_shape, bool pretranspose_a, bool pretranspose_b, DataType data_type)
+    {
+        // We collapse dimensions > 3 onto dimension 3, i.e. 5D+ tensors will look like 4D
+        // This is necessary unless we choose to extend gemm reference for 5D+ tensors
+        TensorShape output_shape_collapsed = output_shape.collapsed_from(Window::DimW);
+        TensorShape shape_a_collapsed      = shape_a.collapsed_from(Window::DimW);
+        TensorShape shape_b_collapsed      = shape_b.collapsed_from(Window::DimW);
+
+        // Create reference
+        SimpleTensor<T> a{ shape_a_collapsed, data_type, 1 };
+        SimpleTensor<T> b{ shape_b_collapsed, data_type, 1 };
+        SimpleTensor<T> c{ output_shape_collapsed, data_type, 1 };
+
+        // Fill reference
+        fill(a, 0);
+        fill(b, 1);
+
+        /* Note: Assuming the usual batch matmul dimensions A = (B x M x K), B = (B x K x N), if pretranspose_A is set to true, then A is assumed to be (B x K x M),
+           therefore, A must be pre-transposed before passing it to the fixture. And, we transpose A again in the fixture to make it (B x M x K)
+           in order to be able to call reference implementation that works with (B x M x K) input.
+           Similarly, if pretranspose_B is set to true, then B is assumed to be (B x N x K), B must be pre-transposed before passing it to the fixture. */
+
+        // Define transposed shapes
+        TensorShape a_transposed_shape(a.shape());
+        a_transposed_shape.set(0, a.shape().y());
+        a_transposed_shape.set(1, a.shape().x());
+
+        TensorShape b_transposed_shape(b.shape());
+        b_transposed_shape.set(0, b.shape().y());
+        b_transposed_shape.set(1, b.shape().x());
+
+        // Define transposed tensors
+        SimpleTensor<T> a_transposed{ a_transposed_shape, data_type };
+        SimpleTensor<T> b_transposed{ b_transposed_shape, data_type };
+
+        // pretranspose a if necessary
+        if(pretranspose_a)
+        {
+            a_transposed = reference::permute<T>(a, PermutationVector(1U, 0U));
+        }
+
+        // pretranspose b if necessary
+        if(pretranspose_b)
+        {
+            b_transposed = reference::permute<T>(b, PermutationVector(1U, 0U));
+        }
+
+        // Setting beta to 0 will effectively disable C for the
+        // computation of the reference: alpha * A * B + 0 * C
+        // Use transposed tensors if boolean enabled else use original tensors
+        SimpleTensor<T> result = reference::gemm<T>((pretranspose_a) ? a_transposed : a, (pretranspose_b) ? b_transposed : b, c, 1.0f, 0.f);
+
+        // We reshape the gemm output back if the tensor is high dimensional
+        if(output_shape_collapsed != output_shape)
+        {
+            result = reference::reshape_layer(result, output_shape);
+        }
+
+        return result;
+    }
+
+    CLTensor        _target{};
+    SimpleTensor<T> _reference{};
+};
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+#endif /* ACL_TESTS_VALIDATION_FIXTURES_BATCHMATMULFIXTURE */