IVGCVSW-8276 GpuFsa Op: Add MatMul

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ib95eb0fd71106e684cb7652917b8de9f0ac73f9c
diff --git a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
index a2708c0..7503c46 100644
--- a/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaEndToEndTests.cpp
@@ -5,9 +5,9 @@
 
 #include "backendsCommon/test/EndToEndTestImpl.hpp"
 
+#include "backendsCommon/test/BatchMatMulEndToEndTestImpl.hpp"
 #include "backendsCommon/test/Convolution2dEndToEndTestImpl.hpp"
 #include "backendsCommon/test/layerTests/CastTestImpl.hpp"
-
 #include "backendsCommon/test/DepthwiseConvolution2dEndToEndTests.hpp"
 #include "backendsCommon/test/ElementwiseBinaryEndToEndTestImpl.hpp"
 #include "backendsCommon/test/Pooling2dEndToEndTestImpl.hpp"
@@ -20,6 +20,13 @@
 
 std::vector<BackendId> gpuFsaDefaultBackends = {"GpuFsa"};
 
+// BatchMatMul
+TEST_CASE("RefBatchMatMulEndToEndFloat32Test")
+{
+    BatchMatMulEndToEnd<armnn::DataType::Float32>(gpuFsaDefaultBackends);
+}
+
+// Cast
 TEST_CASE("GpuFsaCastEndtoEndTestFloat32ToFloat16")
 {
     using namespace half_float::literal;
diff --git a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
index 34af190..b6f7f32 100644
--- a/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaLayerSupportTests.cpp
@@ -17,6 +17,25 @@
 TEST_SUITE("GpuFsaLayerSupport")
 {
 
+TEST_CASE("IsLayerSupportedGpuFsaBatchMatMul")
+{
+    TensorInfo input0Info({ 2, 2 }, DataType::Float32);
+    TensorInfo input1Info({ 2, 2 }, DataType::Float32);
+    TensorInfo outputInfo({ 2, 2 }, DataType::Float32);
+
+    BatchMatMulDescriptor desc{};
+
+    GpuFsaLayerSupport supportChecker;
+    std::string reasonIfNotSupported;
+    auto supported = supportChecker.IsLayerSupported(LayerType::BatchMatMul,
+                                                     {input0Info, input1Info, outputInfo},
+                                                     desc,
+                                                     EmptyOptional(),
+                                                     EmptyOptional(),
+                                                     reasonIfNotSupported);
+    CHECK(supported);
+}
+
 TEST_CASE("IsLayerSupportedCast")
 {
     armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
diff --git a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
index 6ddb942..1e5c976 100644
--- a/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
+++ b/src/backends/gpuFsa/test/GpuFsaOptimizedNetworkTests.cpp
@@ -15,6 +15,52 @@
 TEST_SUITE("GpuFsaOptimizedNetwork")
 {
 
+TEST_CASE("BatchMatMulSupportedOptimizedNetwork")
+{
+    using namespace armnn;
+
+    const float qScale = 1.0f;
+    const int32_t qOffset = 0;
+
+    const TensorShape& input1Shape  = { 2, 2 };
+    const TensorShape& input2Shape  = { 2, 2 };
+    const TensorShape& outputShape  = { 2, 2 };
+
+    TensorInfo input1TensorInfo(input1Shape, DataType::Float32, qScale, qOffset, true);
+    TensorInfo input2TensorInfo(input2Shape, DataType::Float32, qScale, qOffset, true);
+    TensorInfo outputTensorInfo(outputShape, DataType::Float32, qScale, qOffset);
+
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+    INetworkPtr network(INetwork::Create());
+
+    BatchMatMulDescriptor desc{};
+
+    IConnectableLayer* input1 = network->AddInputLayer(0, "input0");
+    IConnectableLayer* input2 = network->AddInputLayer(1, "input1");
+    IConnectableLayer* batchMatMulLayer = network->AddBatchMatMulLayer(desc, "batchMatMul");
+    IConnectableLayer* output = network->AddOutputLayer(2, "output");
+
+    Connect(input1, batchMatMulLayer, input1TensorInfo, 0, 0);
+    Connect(input2, batchMatMulLayer, input2TensorInfo, 0, 1);
+    Connect(batchMatMulLayer, output, outputTensorInfo, 0, 0);
+
+    std::vector<BackendId> backends = { "GpuFsa" };
+
+    OptimizerOptionsOpaque optimizedOptions;
+    IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime->GetDeviceSpec(), optimizedOptions);
+    CHECK(optNet);
+
+    Graph& graph = GetGraphForTesting(optNet.get());
+
+    // Check graph layer sequence to ensure that the network has been replaced with a PreCompiledLayer
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
+                        &IsLayerOfType<InputLayer>,
+                        &IsLayerOfType<InputLayer>,
+                        &IsLayerOfType<PreCompiledLayer>,
+                        &IsLayerOfType<OutputLayer>));
+}
+
 TEST_CASE("CastSupportedOptimizedNetwork")
 {
     using namespace armnn;