IVGCVSW-7307 Add CpuAcc Batch MatMul Workload

* Call dedicated MatMul kernel in ACL
* Add int8 tests
* Add int8 to documentation
* Force tensors to be dynamic (nonConst) as per request of ACL

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I992ae9aae1174214607bf29305f21cdeaf3fdc1b
diff --git a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp
index 74bd97f..504ca1d 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -14,6 +14,7 @@
 #include <armnnUtils/QuantizeHelper.hpp>
 #include <armnnTestUtils/TensorCopyUtils.hpp>
 #include <armnn/Optional.hpp>
+#include <armnn/BackendHelper.hpp>
 
 
 template<armnn::DataType ArmnnType, typename T, std::size_t NumDims>
@@ -29,6 +30,7 @@
     const armnn::TensorInfo& inputYInfo,
     const armnn::TensorInfo& outputInfo)
 {
+    LayerTestResult<T, NumDims> result(outputInfo);
     std::vector<T> outputActual(outputInfo.GetNumElements());
 
     std::unique_ptr<armnn::ITensorHandle> inputXHandle = tensorHandleFactory.CreateTensorHandle(inputXInfo);
@@ -36,13 +38,27 @@
     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputInfo);
 
     armnn::BatchMatMulQueueDescriptor queueDescriptor;
-    queueDescriptor.m_Parameters = descriptor;
+    queueDescriptor.m_Parameters = std::move(descriptor);
     armnn::WorkloadInfo workloadInfo;
 
     AddInputToWorkload(queueDescriptor, workloadInfo, inputXInfo, inputXHandle.get());
     AddInputToWorkload(queueDescriptor, workloadInfo, inputYInfo, inputYHandle.get());
     AddOutputToWorkload(queueDescriptor, workloadInfo, outputInfo, outputHandle.get());
 
+    // Don't execute if BatchMatMul is not supported, as an exception will be raised.
+    const armnn::BackendId& backend = workloadFactory.GetBackendId();
+    std::string reasonIfUnsupported;
+    armnn::LayerSupportHandle handle = armnn::GetILayerSupportByBackendId(backend);
+    result.m_Supported = handle.IsBatchMatMulSupported(inputXInfo,
+                                                       inputYInfo,
+                                                       outputInfo,
+                                                       queueDescriptor.m_Parameters,
+                                                       reasonIfUnsupported);
+    if (!result.m_Supported)
+    {
+        return result;
+    }
+
     auto workload = workloadFactory.CreateWorkload(armnn::LayerType::BatchMatMul, queueDescriptor, workloadInfo);
 
     inputXHandle->Allocate();