IVGCVSW-7109: Add Batch MatMul front end support - Reference

  * Descriptors added for BatchMatMul
  * Layer definition added
  * Input validation added (will likely change when opt. param support comes in)
  * Ref workload implementation for BatchMatMul added (will also change with opt. param support)
  * Ref layer tests made for BatchMatMul
  * CMake and other build files updated

Signed-off-by: Samuel Yap <samuel.yap@arm.com>
Change-Id: Ic885301da543ee0fbe7922b85e7f9658c4efc617
diff --git a/src/armnn/BackendHelper.cpp b/src/armnn/BackendHelper.cpp
index 5b5bece..6638709 100644
--- a/src/armnn/BackendHelper.cpp
+++ b/src/armnn/BackendHelper.cpp
@@ -179,6 +179,22 @@
                                             reasonIfUnsupported);
 }
 
+bool LayerSupportHandle::IsBatchMatMulSupported(const TensorInfo& input0,
+                                                const TensorInfo& input1,
+                                                const TensorInfo& output,
+                                                const BatchMatMulDescriptor& descriptor,
+                                                Optional<std::string&> reasonIfUnsupported)
+{
+    TensorInfos infos{input0, input1, output};
+
+    return m_LayerSupport->IsLayerSupported(LayerType::BatchMatMul,
+                                            infos,
+                                            descriptor,
+                                            EmptyOptional(),
+                                            EmptyOptional(),
+                                            reasonIfUnsupported);
+}
+
 bool LayerSupportHandle::IsBatchNormalizationSupported(const TensorInfo& input,
                                                        const TensorInfo& output,
                                                        const TensorInfo& mean,
diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp
index c740fd0..f957627 100644
--- a/src/armnn/Descriptors.cpp
+++ b/src/armnn/Descriptors.cpp
@@ -455,4 +455,86 @@
     return armnn::GetNumInputs(m_BiasEnabled);
 }
 
+std::pair<std::pair<unsigned int, unsigned int>, std::pair<unsigned int, unsigned int>>
+BatchMatMulDescriptor::GetAxesToMul(
+    const BatchMatMulDescriptor& desc,
+    const TensorShape& tensorXShape,
+    const TensorShape& tensorYShape)
+{
+    // May refactor to just work on one input per call - makes it less confusing and also
+    // allows more flexibility (i.e. in Layer output shape inference)
+
+    auto xNumDims = tensorXShape.GetNumDimensions();
+    auto yNumDims = tensorYShape.GetNumDimensions();
+
+    std::pair<unsigned int, unsigned int> xAxes = { xNumDims-2, xNumDims-1 };
+    std::pair<unsigned int, unsigned int> yAxes = { yNumDims-2, yNumDims-1 };
+
+    if(desc.m_DataLayoutX.has_value())
+    {
+        switch(desc.m_DataLayoutX.value())
+        {
+            case DataLayout::NDHWC:
+            case DataLayout::NHWC:
+                xAxes.first -= 1;
+                xAxes.second -= 1;
+                break;
+            case DataLayout::NCDHW:
+            case DataLayout::NCHW:
+            default:
+                break;
+        }
+    }
+
+    if(desc.m_DataLayoutY.has_value())
+    {
+        switch(desc.m_DataLayoutY.value())
+        {
+            case DataLayout::NDHWC:
+            case DataLayout::NHWC:
+                yAxes.first -= 1;
+                yAxes.second -= 1;
+                break;
+            case DataLayout::NCDHW:
+            case DataLayout::NCHW:
+            default:
+                break;
+        }
+    }
+
+    return { xAxes, yAxes};
+}
+
+std::pair<std::vector<unsigned int>, std::vector<unsigned int>> BatchMatMulDescriptor::GetAxesNotMul(
+    const BatchMatMulDescriptor& desc,
+    const TensorShape& inputXShape,
+    const TensorShape& inputYShape)
+{
+    // May refactor to just work on one input per call - makes it less confusing and also
+    // allows more flexibility (i.e. in Layer output shape inference)
+    auto axesToMul = BatchMatMulDescriptor::GetAxesToMul(desc, inputXShape, inputYShape);
+
+    std::vector<unsigned int> axesXNotMul;
+    std::vector<unsigned int> axesYNotMul;
+
+    for(unsigned int i = 0; i < inputXShape.GetNumDimensions(); i++)
+    {
+        if(i == axesToMul.first.first || i == axesToMul.first.second)
+        {
+            continue;
+        }
+        axesXNotMul.push_back(i);
+    }
+    for(unsigned int i = 0; i < inputYShape.GetNumDimensions(); i++)
+    {
+        if(i == axesToMul.second.first || i == axesToMul.second.second)
+        {
+            continue;
+        }
+        axesYNotMul.push_back(i);
+    }
+
+    return { axesXNotMul, axesYNotMul };
+}
+
 }
diff --git a/src/armnn/ILayerSupport.cpp b/src/armnn/ILayerSupport.cpp
index 5366b13..8099782 100644
--- a/src/armnn/ILayerSupport.cpp
+++ b/src/armnn/ILayerSupport.cpp
@@ -13,7 +13,7 @@
 {
 
 ARMNN_NO_DEPRECATE_WARN_BEGIN
-// IsLayerSupport() forwards to the deprecated virtual methods depending on input LayerType.
+// IsLayerSupported() forwards to the deprecated virtual methods depending on input LayerType.
 // Allows backends continue to behave as before maintaining backward compatibility.
 bool ILayerSupport::IsLayerSupported(const LayerType& type,
                                      const std::vector<TensorInfo>& infos,
diff --git a/src/armnn/LayersFwd.hpp b/src/armnn/LayersFwd.hpp
index dcfb91b..acac1f9 100644
--- a/src/armnn/LayersFwd.hpp
+++ b/src/armnn/LayersFwd.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -9,6 +9,7 @@
 #include "layers/ActivationLayer.hpp"
 #include "layers/AdditionLayer.hpp"
 #include "layers/ArgMinMaxLayer.hpp"
+#include "layers/BatchMatMulLayer.hpp"
 #include "layers/BatchNormalizationLayer.hpp"
 #include "layers/BatchToSpaceNdLayer.hpp"
 #include "layers/CastLayer.hpp"
@@ -110,6 +111,7 @@
 DECLARE_LAYER(Activation)
 DECLARE_LAYER(Addition)
 DECLARE_LAYER(ArgMinMax)
+DECLARE_LAYER(BatchMatMul)
 DECLARE_LAYER(BatchNormalization)
 DECLARE_LAYER(BatchToSpaceNd)
 DECLARE_LAYER(Cast)
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 5d44306..ef9f4e7 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -456,6 +456,12 @@
     return pNetworkImpl->AddChannelShuffleLayer(descriptor, name);
 }
 
+IConnectableLayer* INetwork::AddBatchMatMulLayer(const BatchMatMulDescriptor &descriptor,
+                                                 const char* name)
+{
+    return pNetworkImpl->AddBatchMatMulLayer(descriptor, name);
+}
+
 void INetwork::ExecuteStrategy(IStrategy& strategy) const
 {
     return pNetworkImpl->ExecuteStrategy(strategy);
@@ -2876,6 +2882,11 @@
     return layer;
 }
 
+IConnectableLayer* NetworkImpl::AddBatchMatMulLayer(const BatchMatMulDescriptor& desc, const char* name)
+{
+    return m_Graph->AddLayer<BatchMatMulLayer>(desc, name);
+}
+
 IConnectableLayer* NetworkImpl::AddPrecompiledLayer(const PreCompiledDescriptor& preCompiledDescriptor,
                                                     CompiledBlobPtr compiledBlobPtr,
                                                     const Optional<BackendId>& backend,
diff --git a/src/armnn/Network.hpp b/src/armnn/Network.hpp
index a4387e6..19a0286 100644
--- a/src/armnn/Network.hpp
+++ b/src/armnn/Network.hpp
@@ -49,6 +49,9 @@
     IConnectableLayer* AddArgMinMaxLayer(const ArgMinMaxDescriptor& desc,
                                          const char* name = nullptr);
 
+    IConnectableLayer* AddBatchMatMulLayer(const BatchMatMulDescriptor& desc,
+                                           const char* name = nullptr);
+
     IConnectableLayer* AddBatchNormalizationLayer(const BatchNormalizationDescriptor& desc,
                                                   const ConstTensor& mean,
                                                   const ConstTensor& variance,
diff --git a/src/armnn/layers/BatchMatMulLayer.cpp b/src/armnn/layers/BatchMatMulLayer.cpp
new file mode 100644
index 0000000..501de2d
--- /dev/null
+++ b/src/armnn/layers/BatchMatMulLayer.cpp
@@ -0,0 +1,97 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "BatchMatMulLayer.hpp"
+
+#include <armnn/backends/WorkloadFactory.hpp>
+#include "layers/LayerCloneBase.hpp"
+
+namespace armnn
+{
+
+BatchMatMulLayer::BatchMatMulLayer(const BatchMatMulDescriptor& param, const char* name)
+    : LayerWithParameters(2, 1, LayerType::BatchMatMul, param, name)
+{}
+
+std::unique_ptr<IWorkload> BatchMatMulLayer::CreateWorkload(const IWorkloadFactory& factory) const
+{
+    BatchMatMulQueueDescriptor descriptor;
+    SetAdditionalInfo(descriptor);
+
+    return factory.CreateWorkload(LayerType::BatchMatMul, descriptor, PrepInfoAndDesc(descriptor));
+}
+
+BatchMatMulLayer* BatchMatMulLayer::Clone(Graph& graph) const
+{
+    auto layer = CloneBase<BatchMatMulLayer>(graph, m_Param, GetName());
+
+    return std::move(layer);
+}
+
+std::vector<TensorShape> BatchMatMulLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+{
+    ARMNN_ASSERT(inputShapes.size() == 2);
+
+    TensorShape inputXShape = inputShapes[0];
+    TensorShape inputYShape = inputShapes[1];
+
+    // Note: Take into account what pre-adjoint or pre-transposing will do to the inferred output shape
+
+    TensorShape& longerInput = inputXShape.GetNumDimensions() >= inputYShape.GetNumDimensions()?
+                               inputXShape:inputYShape;
+    TensorShape& shorterInput = inputXShape.GetNumDimensions() >= inputYShape.GetNumDimensions()?
+                                inputYShape:inputXShape;
+
+    unsigned int inputNumDimsOffset = longerInput.GetNumDimensions() - shorterInput.GetNumDimensions();
+
+    unsigned int outputNumDimensions = longerInput.GetNumDimensions();
+
+    std::vector<unsigned int> tensorDimensions(outputNumDimensions, 0);
+
+    auto axesToMul = BatchMatMulDescriptor::GetAxesToMul(m_Param, inputXShape, inputYShape);
+    const auto& longerAxesToMul = (axesToMul.first.first >= axesToMul.second.first &&
+                             axesToMul.first.second >= axesToMul.second.second) ?
+                                 axesToMul.first : axesToMul.second;
+
+    for (unsigned int i = 0; i < outputNumDimensions; ++i)
+    {
+        if (i == longerAxesToMul.first)
+        {
+            tensorDimensions[i] = &shorterInput == &inputXShape ? inputXShape[i - inputNumDimsOffset] : inputXShape[i];
+        }
+        else if(i == longerAxesToMul.second)
+        {
+            tensorDimensions[i] = &shorterInput == &inputYShape ? inputYShape[i - inputNumDimsOffset] : inputYShape[i];
+        }
+        else // The other dimensions not to be multiplied (but may be broadcasted)
+        {
+            // Does NOT validate whether it's a valid broadcast - that's done in the validate func in WorkloadData.cpp
+            tensorDimensions[i] = static_cast<int>(i) - static_cast<int>(inputNumDimsOffset) < 0 ?
+                longerInput[i] :
+                std::max(longerInput[i], shorterInput[i - inputNumDimsOffset]);
+        }
+    }
+
+    auto outputShape = TensorShape(outputNumDimensions, tensorDimensions.data());
+    return std::vector<TensorShape>({ outputShape });
+}
+
+void BatchMatMulLayer::ValidateTensorShapesFromInputs()
+{
+    VerifyLayerConnections(2, CHECK_LOCATION());
+
+    const TensorShape& outputShape = GetOutputSlot(0).GetTensorInfo().GetShape();
+
+    VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
+
+    auto inferredShapes = InferOutputShapes({
+        GetInputSlot(0).GetConnection()->GetTensorInfo().GetShape(),
+        GetInputSlot(1).GetConnection()->GetTensorInfo().GetShape() });
+
+    ARMNN_ASSERT(inferredShapes.size() == 1);
+
+    ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchMatMulLayer");
+}
+
+} // namespace armnn
\ No newline at end of file
diff --git a/src/armnn/layers/BatchMatMulLayer.hpp b/src/armnn/layers/BatchMatMulLayer.hpp
new file mode 100644
index 0000000..8dc79d3
--- /dev/null
+++ b/src/armnn/layers/BatchMatMulLayer.hpp
@@ -0,0 +1,46 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "LayerWithParameters.hpp"
+
+namespace armnn
+{
+
+class BatchMatMulLayer : public LayerWithParameters<BatchMatMulDescriptor>
+{
+public:
+    /// Makes a workload for the BatchMatMul type.
+    /// @param [in] graph The graph where this layer can be found.
+    /// @param [in] factory The workload factory which will create the workload.
+    /// @return A pointer to the created workload, or nullptr if not created.
+    virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory &factory) const override;
+
+    /// Creates a dynamically-allocated copy of this layer.
+    /// @param [in] graph The graph into which this layer is being cloned.
+    BatchMatMulLayer* Clone(Graph &graph) const override;
+
+    /// Infers the output shape from the given input shapes.
+    /// @param [in] inputShapes The vector of input shapes for BatchMatMul.
+    /// @return A vector of inferred output shape.
+    std::vector<TensorShape> InferOutputShapes(const std::vector<TensorShape>& inputShapes) const override;
+
+    /// Check if the input tensor shapes
+    /// will lead to a valid configuration of @ref BatchMatMulLayer.
+    /// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validated.
+    void ValidateTensorShapesFromInputs() override;
+
+protected:
+    /// Constructor to create a BatchMatMulLayer.
+    /// @param [in] param BatchMatMulDescriptor to configure optional parameters for batch matrix multiplication
+    /// @param [in] name Optional name for the layer
+    BatchMatMulLayer(const BatchMatMulDescriptor& param, const char* name);
+
+    /// Default destructor
+    ~BatchMatMulLayer() = default;
+};
+
+}
\ No newline at end of file