MLCE-753 Expand Tensorshape for relevent layers before verifying support

   Previously we were adding a reshape layer to "broadcast" tensors
   for elementwise operations. This broadcast was happening too late
   and was really just an expand dims. This was breaking the constant
   attributes of tensors and layer support of certain backends.

 * Remove addition of reshape layer when expanding dimensions
 * Replace broadcast function with expand dims to equal rank function
 * Fix some error status checks in various layers
 * Add new TensorUtil function that expands dims to a defined rank
 * Add unit tests to new TensorUtil function

Signed-off-by: Ryan OShea <ryan.oshea3@arm.com>
Change-Id: I31aca47c98075fef4f86864a15470f5faa55ab8d
diff --git a/delegate/src/Comparison.hpp b/delegate/src/Comparison.hpp
index 80354e8..688f90c 100644
--- a/delegate/src/Comparison.hpp
+++ b/delegate/src/Comparison.hpp
@@ -57,10 +57,17 @@
         return kTfLiteError;
     }
 
-    const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
-    const armnn::TensorInfo& inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
+    armnn::TensorInfo inputTensorInfo0 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor0);
+    armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
+    // Check if we need to expand the dims of any of the input tensor infos.
+    // This is required for a few of the backends.
+    if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+    {
+        ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+    }
+
     armnn::ComparisonOperation comparisonOperation = armnn::ComparisonOperation::Equal;
     switch(tfLiteComparisonOperatorCode)
     {
@@ -122,17 +129,7 @@
         return kTfLiteError;
     }
 
-    auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
-                                        inputTensorInfo1,
-                                        comparisonLayer,
-                                        tfLiteContext,
-                                        tfLiteNode,
-                                        delegateData);
-    if (!reshapeLayer)
-    {
-        return kTfLiteError;
-    }
-    return kTfLiteOk;
+    return Connect(comparisonLayer, tfLiteNode, delegateData);
 }
 
 } // namespace armnnDelegate
diff --git a/delegate/src/Control.hpp b/delegate/src/Control.hpp
index 17f23d8..a3ea6e9 100644
--- a/delegate/src/Control.hpp
+++ b/delegate/src/Control.hpp
@@ -172,7 +172,10 @@
 
     armnn::IOutputSlot& outputSlot = concatenationLayer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
-    Connect(concatenationLayer, tfLiteNode, delegateData);
+    if(Connect(concatenationLayer, tfLiteNode, delegateData) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
 
     if (activationType == kTfLiteActNone)
     {
diff --git a/delegate/src/Convolution.hpp b/delegate/src/Convolution.hpp
index a8559e2..31cb2ab 100644
--- a/delegate/src/Convolution.hpp
+++ b/delegate/src/Convolution.hpp
@@ -222,7 +222,10 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
-    Connect(layer, tfLiteNode, delegateData);
+    if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
 
     if (!tfLiteNodeParameters)
     {
@@ -408,7 +411,10 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
-    Connect(layer, tfLiteNode, delegateData);
+    if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
 
     if (!tfLiteNodeParameters)
     {
@@ -624,7 +630,11 @@
     armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
     outputSlot.SetTensorInfo(outputTensorInfo);
 
-    Connect(layer, tfLiteNode, delegateData);
+    if(Connect(layer, tfLiteNode, delegateData) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
+
     if (!tfLiteNodeParameters)
     {
         // No Activation
diff --git a/delegate/src/DelegateUtils.hpp b/delegate/src/DelegateUtils.hpp
index 3e74225..1aa9029 100644
--- a/delegate/src/DelegateUtils.hpp
+++ b/delegate/src/DelegateUtils.hpp
@@ -13,6 +13,7 @@
 #include <armnn/utility/NumericCast.hpp>
 
 #include <armnnUtils/Permute.hpp>
+#include <armnnUtils/TensorUtils.hpp>
 
 #include <tensorflow/lite/builtin_ops.h>
 #include <tensorflow/lite/c/builtin_op_data.h>
@@ -188,91 +189,25 @@
     return kTfLiteOk;
 }
 
-armnn::IConnectableLayer* BroadcastTensor(const armnn::TensorInfo& inputInfo0,
-                                          const armnn::TensorInfo& inputInfo1,
-                                          armnn::IConnectableLayer* startLayer,
-                                          TfLiteContext* tfLiteContext,
-                                          TfLiteNode* tfLiteNode,
-                                          armnnDelegate::DelegateData& delegateData)
+void ExpandTensorRankToEqual(armnn::TensorInfo& inputInfo0,
+                             armnn::TensorInfo& inputInfo1)
 {
     unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
     unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
 
     if (inputDimensions0 == inputDimensions1)
     {
-        auto status = Connect(startLayer, tfLiteNode, delegateData);
-        return status == kTfLiteOk ? startLayer : nullptr;
+        return;
     }
 
     unsigned int biggerInputDimensions = std::max(inputDimensions0, inputDimensions1);
-    unsigned int dimDifference = static_cast<unsigned int>(std::abs(armnn::numeric_cast<int>(inputDimensions0) -
-                                                                    armnn::numeric_cast<int>(inputDimensions1)));
 
     bool input0IsSmaller = inputDimensions0 < inputDimensions1;
-    const armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
-    const armnn::TensorShape& smallShape = smallInfo.GetShape();
+    armnn::TensorInfo& smallInfo = input0IsSmaller ? inputInfo0 : inputInfo1;
+    const armnn::TensorShape& newShape = armnnUtils::ExpandDimsToRank(smallInfo.GetShape(), biggerInputDimensions);
 
-    std::vector<unsigned int> reshapedDimensions(biggerInputDimensions, 1);
-    for (unsigned int i = dimDifference; i < biggerInputDimensions; ++i)
-    {
-        reshapedDimensions[i] = smallShape[i - dimDifference];
-    }
+    smallInfo.SetShape(newShape);
 
-    armnn::TensorInfo reshapedInfo = smallInfo;
-    reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
-                                              reshapedDimensions.data() });
-
-    armnn::ReshapeDescriptor reshapeDescriptor;
-    reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
-    bool isSupported = false;
-    armnn::BackendId setBackend;
-    FORWARD_LAYER_SUPPORT_FUNC("RESHAPE",
-                               tfLiteContext,
-                               IsReshapeSupported,
-                               delegateData.m_Backends,
-                               isSupported,
-                               setBackend,
-                               smallInfo,
-                               reshapedInfo,
-                               reshapeDescriptor);
-    if (!isSupported)
-    {
-        return nullptr;
-    }
-
-    ARMNN_ASSERT(delegateData.m_Network != nullptr);
-    // Add Reshape layer
-    armnn::IConnectableLayer* reshapeLayer = delegateData.m_Network->AddReshapeLayer(reshapeDescriptor);
-    reshapeLayer->SetBackendId(setBackend);
-    ARMNN_ASSERT(reshapeLayer != nullptr);
-    reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
-
-    if (input0IsSmaller)
-    {
-        delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
-            ->Connect(reshapeLayer->GetInputSlot(0));
-        reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
-        delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
-            ->Connect(startLayer->GetInputSlot(1));
-    }
-    else
-    {
-        delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[1])]
-            ->Connect(reshapeLayer->GetInputSlot(0));
-        reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
-        delegateData.m_OutputSlotForNode[static_cast<unsigned long>(tfLiteNode->inputs->data[0])]
-            ->Connect(startLayer->GetInputSlot(0));
-    }
-
-    // Prepare output slots
-    for (unsigned int outputIndex = 0; outputIndex < startLayer->GetNumOutputSlots(); ++outputIndex)
-    {
-        armnn::IOutputSlot& outputSlot = startLayer->GetOutputSlot(outputIndex);
-        delegateData.m_OutputSlotForNode
-            [static_cast<unsigned long>(tfLiteNode->outputs->data[outputIndex])] = &outputSlot;
-    }
-
-    return reshapeLayer;
 }
 
 TfLiteStatus FusedActivation(TfLiteContext* tfLiteContext,
diff --git a/delegate/src/ElementwiseBinary.hpp b/delegate/src/ElementwiseBinary.hpp
index 8096acf..52c6b24 100644
--- a/delegate/src/ElementwiseBinary.hpp
+++ b/delegate/src/ElementwiseBinary.hpp
@@ -254,6 +254,13 @@
 
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
+    // Check if we need to expand the dims of the input tensor infos.
+    // This is required for a few of the backends.
+    if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+    {
+        ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+    }
+
     auto* tfLiteNodeParameters = reinterpret_cast<TfLiteAddParams*>(tfLiteNode->builtin_data);
     TfLiteFusedActivation activationType = kTfLiteActNone;
     if (tfLiteNodeParameters)
@@ -363,13 +370,7 @@
         return inputsTensorsProcess;
     }
 
-    auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
-                                        inputTensorInfo1,
-                                        elementwiseBinaryLayer,
-                                        tfLiteContext,
-                                        tfLiteNode,
-                                        delegateData);
-    if (!reshapeLayer)
+    if(Connect(elementwiseBinaryLayer, tfLiteNode, delegateData) != kTfLiteOk)
     {
         return kTfLiteError;
     }
diff --git a/delegate/src/Gather.hpp b/delegate/src/Gather.hpp
index 9e98966..9125997 100644
--- a/delegate/src/Gather.hpp
+++ b/delegate/src/Gather.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -101,8 +101,6 @@
         return inputsTensorsProcess;
     }
 
-    Connect(layer, tfLiteNode, delegateData);
-
-    return kTfLiteOk;
+    return Connect(layer, tfLiteNode, delegateData);
 }
 } // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/src/GatherNd.hpp b/delegate/src/GatherNd.hpp
index f2192f7..cf526e1 100644
--- a/delegate/src/GatherNd.hpp
+++ b/delegate/src/GatherNd.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -77,8 +77,6 @@
         return inputsTensorsProcess;
     }
 
-    Connect(layer, tfLiteNode, delegateData);
-
-    return kTfLiteOk;
+    return Connect(layer, tfLiteNode, delegateData);
 }
 } // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/src/LogicalBinary.hpp b/delegate/src/LogicalBinary.hpp
index b6a8f5d..d71618e 100644
--- a/delegate/src/LogicalBinary.hpp
+++ b/delegate/src/LogicalBinary.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -46,6 +46,13 @@
     armnn::TensorInfo inputTensorInfo1 = GetTensorInfoForTfLiteTensor(tfLiteInputTensor1);
     const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor, true);
 
+    // Check if we need to expand the dims of any of the input tensor infos.
+    // This is required for a few of the backends.
+    if(inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
+    {
+        ExpandTensorRankToEqual(inputTensorInfo0, inputTensorInfo1);
+    }
+
     // Setup descriptor and assign operation
     armnn::LogicalBinaryDescriptor desc;
     desc.m_Operation = binaryOperation;
@@ -89,18 +96,7 @@
         return inputsTensorsProcess;
     }
 
-    // LogicalBinary operators support broadcasting
-    auto reshapeLayer = BroadcastTensor(inputTensorInfo0,
-                                        inputTensorInfo1,
-                                        logicalBinaryLayer,
-                                        tfLiteContext,
-                                        tfLiteNode,
-                                        delegateData);
-    if (!reshapeLayer)
-    {
-        return kTfLiteError;
-    }
-    return kTfLiteOk;
+    return Connect(logicalBinaryLayer, tfLiteNode, delegateData);
 }
 
 } // namespace armnnDelegate
diff --git a/delegate/src/test/ElementwiseBinaryTest.cpp b/delegate/src/test/ElementwiseBinaryTest.cpp
index 9d03204..8099efe 100644
--- a/delegate/src/test/ElementwiseBinaryTest.cpp
+++ b/delegate/src/test/ElementwiseBinaryTest.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -885,7 +885,7 @@
     AddBroadcastTest(backends);
 }
 
-TEST_CASE ("ADD_Actiation_CpuAcc_Test")
+TEST_CASE ("ADD_Activation_CpuAcc_Test")
 {
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     AddActivationTest(backends);
@@ -1017,7 +1017,7 @@
     AddConstInputTest(backends);
 }
 
-TEST_CASE ("ADD_Actiation_CpuRef_Test")
+TEST_CASE ("ADD_Activation_CpuRef_Test")
 {
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     AddActivationTest(backends);
diff --git a/include/armnnUtils/TensorUtils.hpp b/include/armnnUtils/TensorUtils.hpp
index a2aa9b0..7bf41c1 100644
--- a/include/armnnUtils/TensorUtils.hpp
+++ b/include/armnnUtils/TensorUtils.hpp
@@ -48,6 +48,8 @@
 
 armnn::TensorShape ExpandDims(const armnn::TensorShape& tensorShape, int axis);
 
+armnn::TensorShape ExpandDimsToRank(const armnn::TensorShape& tensorShape, unsigned int rank);
+
 std::vector<unsigned int> SqueezeDims(const armnn::TensorShape& tensorShape);
 
 unsigned int GetNumElementsBetween(const armnn::TensorShape& shape,
diff --git a/src/armnnUtils/TensorUtils.cpp b/src/armnnUtils/TensorUtils.cpp
index 03109e0..cb73d92 100644
--- a/src/armnnUtils/TensorUtils.cpp
+++ b/src/armnnUtils/TensorUtils.cpp
@@ -165,6 +165,31 @@
     return { outputDim, outputShape.data() };
 }
 
+TensorShape ExpandDimsToRank(const TensorShape& tensorShape, unsigned int rank)
+{
+    // Can't expand if rank is smaller than current shape
+    if (tensorShape.GetNumDimensions() >= rank)
+    {
+        return tensorShape;
+    }
+
+    std::vector<unsigned int> newShape;
+
+    // First add 1s to the beginning of the tensorInfo to fill in the space
+    for (unsigned int i = 0; i < rank - tensorShape.GetNumDimensions(); ++i)
+    {
+        newShape.push_back(1);
+    }
+
+    // Then iterate through the original shape and append it to the new shape with the added 1s
+    for (unsigned int i = 0; i < tensorShape.GetNumDimensions(); ++i)
+    {
+        newShape.push_back(tensorShape[i]);
+    }
+
+    return TensorShape(static_cast<unsigned int>(newShape.size()), newShape.data());
+}
+
 std::vector<unsigned int> SqueezeDims(const TensorShape& tensorShape)
 {
     std::vector<unsigned int> squeezedDims;
diff --git a/src/armnnUtils/test/TensorUtilsTest.cpp b/src/armnnUtils/test/TensorUtilsTest.cpp
index a69a009..ed21bbe 100644
--- a/src/armnnUtils/test/TensorUtilsTest.cpp
+++ b/src/armnnUtils/test/TensorUtilsTest.cpp
@@ -126,11 +126,79 @@
     CHECK_THROWS_AS(ExpandDims(inputShape, 4), armnn::InvalidArgumentException);
 }
 
+TEST_CASE("ExpandDimsInvalidNegativeAxisTest")
+{
+    armnn::TensorShape inputShape({ 2, 3, 4 });
+
+    // Invalid expand dimension -5
+    CHECK_THROWS_AS(ExpandDims(inputShape, -5), armnn::InvalidArgumentException);
+}
+
+TEST_CASE("ExpandDimsBy1Rank")
+{
+    armnn::TensorShape inputShape({ 2, 3, 4 });
+
+    // Expand by 1 dimension
+    armnn::TensorShape outputShape = ExpandDimsToRank(inputShape, 4);
+    CHECK(outputShape.GetNumDimensions() == 4);
+    CHECK(outputShape[0] == 1);
+    CHECK(outputShape[1] == 2);
+    CHECK(outputShape[2] == 3);
+    CHECK(outputShape[3] == 4);
+}
+
+TEST_CASE("ExpandDimsBy2Ranks")
+{
+    armnn::TensorShape inputShape({ 3, 4 });
+
+    // Expand 2 dimensions
+    armnn::TensorShape outputShape = ExpandDimsToRank(inputShape, 4);
+    CHECK(outputShape.GetNumDimensions() == 4);
+    CHECK(outputShape[0] == 1);
+    CHECK(outputShape[1] == 1);
+    CHECK(outputShape[2] == 3);
+    CHECK(outputShape[3] == 4);
+}
+
+TEST_CASE("ExpandDimsBy3Ranks")
+{
+    armnn::TensorShape inputShape({ 4 });
+
+    // Expand 3 dimensions
+    armnn::TensorShape outputShape = ExpandDimsToRank(inputShape, 4);
+    CHECK(outputShape.GetNumDimensions() == 4);
+    CHECK(outputShape[0] == 1);
+    CHECK(outputShape[1] == 1);
+    CHECK(outputShape[2] == 1);
+    CHECK(outputShape[3] == 4);
+}
+
+TEST_CASE("ExpandDimsInvalidRankAmount")
+{
+    armnn::TensorShape inputShape({ 2, 3, 4 });
+
+    // Don't expand because target rank is smaller than current rank
+    armnn::TensorShape outputShape = ExpandDimsToRank(inputShape, 2);
+    CHECK(outputShape.GetNumDimensions() == 3);
+    CHECK(outputShape[0] == 2);
+    CHECK(outputShape[1] == 3);
+    CHECK(outputShape[2] == 4);
+}
+
+TEST_CASE("ExpandDimsToRankInvalidTensorShape")
+{
+    armnn::TensorShape inputShape({ 2, 3, 4 });
+
+    // Throw exception because rank 6 tensors are unsupported by armnn
+    CHECK_THROWS_AS(ExpandDimsToRank(inputShape, 6), armnn::InvalidArgumentException);
+}
+
+
 TEST_CASE("ReduceDimsShapeAll1s")
 {
     armnn::TensorShape inputShape({ 1, 1, 1 });
 
-    // Invalid expand dimension 4
+    // Reduce dimension 2
     armnn::TensorShape outputShape = ReduceDims(inputShape, 2);
     CHECK(outputShape.GetNumDimensions() == 2);
     CHECK(outputShape[0] == 1);
@@ -141,7 +209,7 @@
 {
     armnn::TensorShape inputShape({ 1, 2, 1 });
 
-    // Invalid expand dimension 4
+    // Reduce dimension 1
     armnn::TensorShape outputShape = ReduceDims(inputShape, 1);
     CHECK(outputShape.GetNumDimensions() == 2);
     CHECK(outputShape[0] == 2);
@@ -152,7 +220,7 @@
 {
     armnn::TensorInfo inputInfo({ 1, 1, 1 }, DataType::Float32);
 
-    // Invalid expand dimension 4
+    // Reduce dimension 2
     armnn::TensorInfo outputInfo = ReduceDims(inputInfo, 2);
     CHECK(outputInfo.GetShape().GetNumDimensions() == 2);
     CHECK(outputInfo.GetShape()[0] == 1);
@@ -163,7 +231,7 @@
 {
     armnn::TensorInfo inputInfo({ 1, 2, 1 }, DataType::Float32);
 
-    // Invalid expand dimension 4
+    // Reduce dimension 1
     armnn::TensorInfo outputInfo = ReduceDims(inputInfo, 1);
     CHECK(outputInfo.GetNumDimensions() == 2);
     CHECK(outputInfo.GetShape()[0] == 2);
@@ -174,7 +242,7 @@
 {
     armnn::TensorShape inputShape({ 1, 1, 1 });
 
-    // Invalid expand dimension 4
+    // Do not reduce because dimension does not exist
     armnn::TensorShape outputShape = ReduceDims(inputShape, 4);
     CHECK(outputShape.GetNumDimensions() == 3);
     CHECK(outputShape[0] == 1);
@@ -182,13 +250,6 @@
     CHECK(outputShape[2] == 1);
 }
 
-TEST_CASE("ExpandDimsInvalidNegativeAxisTest")
-{
-    armnn::TensorShape inputShape({ 2, 3, 4 });
-
-    // Invalid expand dimension -5
-    CHECK_THROWS_AS(ExpandDims(inputShape, -5), armnn::InvalidArgumentException);
-}
 
 TEST_CASE("ToFloatArrayInvalidDataType")
 {