IVGCVSW-7603 Implement Reshape operators for Opaque Delegate

 * Moved CreateOutputTensorShape function to common DelegateUtils.hpp

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I3d8a9834ecd6b7cda170cce958677a0dde62824a
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index bea5566..003dffa 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -304,6 +304,7 @@
              test/PreluTestHelper.hpp
              test/ReduceTest.cpp
              test/ReduceTestHelper.hpp
+             test/ReshapeTest.cpp
              test/ResizeTest.cpp
              test/ResizeTestHelper.hpp
              test/RoundTest.cpp
diff --git a/delegate/classic/src/Redefine.hpp b/delegate/classic/src/Redefine.hpp
index 7aef74f..41c62c3 100644
--- a/delegate/classic/src/Redefine.hpp
+++ b/delegate/classic/src/Redefine.hpp
@@ -13,7 +13,6 @@
 #include <tensorflow/lite/c/builtin_op_data.h>
 #include <tensorflow/lite/c/common.h>
 #include <tensorflow/lite/minimal_logging.h>
-#include <numeric>
 
 namespace armnnDelegate
 {
@@ -84,36 +83,6 @@
     return Connect(layer, tfLiteNode, delegateData);
 }
 
-
-TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo,
-                                     const std::vector<int32_t>& targetShape,
-                                     armnn::ReshapeDescriptor& reshapeDesc)
-{
-    std::vector<unsigned int> outputDims(targetShape.begin(), targetShape.end());
-    const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1);
-
-    if (stretchDim != targetShape.end())
-    {
-        if (std::find(std::next(stretchDim), targetShape.end(), -1) != targetShape.end())
-        {
-            // Return kTfLiteError and log the error after returning
-            return kTfLiteError;
-        }
-
-        auto targetNumElements =
-            armnn::numeric_cast<unsigned int>(
-                std::accumulate(targetShape.begin(), targetShape.end(), -1, std::multiplies<int32_t>()));
-
-        auto stretchIndex = static_cast<size_t>(std::distance(targetShape.begin(), stretchDim));
-        outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
-    }
-
-    armnn::TensorShape outputShape = armnn::TensorShape(static_cast<unsigned int>(outputDims.size()),
-                                                        outputDims.data());
-    reshapeDesc.m_TargetShape = outputShape;
-    return kTfLiteOk;
-}
-
 TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
                                   TfLiteContext* tfLiteContext,
                                   TfLiteNode* tfLiteNode,
diff --git a/delegate/common/src/DelegateUtils.hpp b/delegate/common/src/DelegateUtils.hpp
index 51c70f9..37fe9b5 100644
--- a/delegate/common/src/DelegateUtils.hpp
+++ b/delegate/common/src/DelegateUtils.hpp
@@ -21,6 +21,8 @@
 #include <tensorflow/lite/minimal_logging.h>
 #include <tensorflow/lite/kernels/kernel_util.h>
 
+#include <numeric>
+
 namespace
 {
 
@@ -138,4 +140,33 @@
     }
 }
 
+TfLiteStatus CreateOutputTensorShape(const armnn::TensorInfo& inputTensorInfo,
+                                     const std::vector<int32_t>& targetShape,
+                                     armnn::ReshapeDescriptor& reshapeDesc)
+{
+    std::vector<unsigned int> outputDims(targetShape.begin(), targetShape.end());
+    const auto stretchDim = std::find(targetShape.begin(), targetShape.end(), -1);
+
+    if (stretchDim != targetShape.end())
+    {
+        if (std::find(std::next(stretchDim), targetShape.end(), -1) != targetShape.end())
+        {
+            // Return kTfLiteError and log the error after returning
+            return kTfLiteError;
+        }
+
+        auto targetNumElements =
+                armnn::numeric_cast<unsigned int>(
+                        std::accumulate(targetShape.begin(), targetShape.end(), -1, std::multiplies<int32_t>()));
+
+        auto stretchIndex = static_cast<size_t>(std::distance(targetShape.begin(), stretchDim));
+        outputDims[stretchIndex] = inputTensorInfo.GetNumElements() / targetNumElements;
+    }
+
+    armnn::TensorShape outputShape = armnn::TensorShape(static_cast<unsigned int>(outputDims.size()),
+                                                        outputDims.data());
+    reshapeDesc.m_TargetShape = outputShape;
+    return kTfLiteOk;
+}
+
 } // namespace anonymous
diff --git a/delegate/opaque/src/Redefine.hpp b/delegate/opaque/src/Redefine.hpp
index 7dd8561..dc424cf 100644
--- a/delegate/opaque/src/Redefine.hpp
+++ b/delegate/opaque/src/Redefine.hpp
@@ -4,15 +4,7 @@
 //
 #pragma once
 
-#include <armnn/utility/IgnoreUnused.hpp>
-
-#include "OpaqueDelegateUtils.hpp"
-
-#include <tensorflow/lite/builtin_ops.h>
-#include <tensorflow/lite/c/builtin_op_data.h>
-#include <tensorflow/lite/c/common.h>
-#include <tensorflow/lite/minimal_logging.h>
-#include <numeric>
+#include <OpaqueDelegateUtils.hpp>
 
 namespace armnnOpaqueDelegate
 {
@@ -62,13 +54,13 @@
     armnn::BackendId setBackend;
     auto             validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported) {
         FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("CAST",
-                                   tfLiteContext,
-                                   IsCastSupported,
-                                   delegateData.m_Backends,
-                                   isSupported,
-                                   setBackend,
-                                   inputTensorInfo,
-                                   outInfo);
+                                          tfLiteContext,
+                                          IsCastSupported,
+                                          delegateData.m_Backends,
+                                          isSupported,
+                                          setBackend,
+                                          inputTensorInfo,
+                                          outInfo);
     };
 
     // If the m_Network is a nullptr, this signals that a prerequisite TfLite callback is required to clarify the
@@ -97,4 +89,174 @@
     // Connect
     return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
 }
+
+TfLiteStatus VisitReshapeOperator(DelegateData& delegateData,
+                                  TfLiteOpaqueContext* tfLiteContext,
+                                  TfLiteOpaqueNode* tfLiteNode,
+                                  int nodeIndex,
+                                  int32_t operatorCode)
+{
+    auto numInputs = TfLiteOpaqueNodeNumberOfInputs(tfLiteNode);
+
+    if (numInputs == 2)
+    {
+        TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+    }
+    else
+    {
+        TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+    }
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    // Gather input indices and use to get input tensor.
+    const int* inputTensors;
+    if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+
+    const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    // Gather output indices and use to get output tensors.
+    int numOutputs = 0;
+    const int* outputTensors;
+    if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+
+    const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& inputTensorInfo0 = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+    const armnn::TensorInfo& outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor, true);
+
+    armnn::ReshapeDescriptor reshapeDesc;
+    std::vector<int32_t> targetShape;
+
+    auto* reshapeOptions = reinterpret_cast<TfLiteReshapeParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+
+    // The new shape can be defined by either a second input tensor or by a builtin option, we need to check for both.
+    // Options might be set without valid data. we need to check the dimensions are in a valid range.
+    if (reshapeOptions && reshapeOptions->num_dimensions > 0 && reshapeOptions->num_dimensions <= 8)
+    {
+        for (int i = 0; i < reshapeOptions->num_dimensions; ++i)
+        {
+            targetShape.push_back(reshapeOptions->shape[i]);
+        }
+    }
+    else if (numInputs == 2)
+    {
+        // Get shape from the second input tensor
+        const TfLiteOpaqueTensor* tfLiteShapeInputTensor =
+                TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
+        if (!IsValid(tfLiteContext, tfLiteShapeInputTensor, operatorCode, nodeIndex))
+        {
+            return kTfLiteError;
+        }
+
+        int32_t numDims = TfLiteOpaqueTensorNumDims(tfLiteShapeInputTensor);
+        if (numDims != 1)
+        {
+            TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                    tfLiteContext,
+                    "TfLiteArmnnOpaqueDelegate: Target 'shape' input is not a 1D tensor in "
+                    "operator #%d node #%d: Falling back to TfLiteOptions.",
+                    operatorCode, nodeIndex);
+        }
+        else
+        {
+            // Get the shape data out of the input tensor
+            auto* shapeTensorDataPtr = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteShapeInputTensor));
+            int32_t shapeTensorNumValues = TfLiteOpaqueTensorDim(tfLiteShapeInputTensor, 0);
+            for (int32_t i = 0; i < shapeTensorNumValues; ++i)
+            {
+                targetShape.push_back(shapeTensorDataPtr[i]);
+            }
+        }
+    }
+    else
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Target shape not defined in reshape parameters or input tensor. "
+                "At least one method required in operator #%d node #%d: ",
+                operatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    // Use the data to create the required tensor shape.
+    if (CreateOutputTensorShape(inputTensorInfo0, targetShape, reshapeDesc) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: At most one component of shape can be -1 in: "
+                "operator #%d node #%d: ",
+                operatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    if (reshapeDesc.m_TargetShape.GetNumElements() != inputTensorInfo0.GetNumElements())
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Reshape, number of elements in output shape does not match input "
+                "operator #%d node #%d: ",
+                operatorCode, nodeIndex);
+        return kTfLiteError;
+    }
+
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("RESHAPE",
+                                          tfLiteContext,
+                                          IsReshapeSupported,
+                                          delegateData.m_Backends,
+                                          isSupported,
+                                          setBackend,
+                                          inputTensorInfo0,
+                                          outInfo,
+                                          reshapeDesc);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+    layer->SetBackendId(setBackend);
+    ARMNN_ASSERT(layer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer,delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk )
+    {
+        return kTfLiteError;
+    }
+
+    // Connect
+    return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
 }
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index c96f75d..2fd8142 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -1002,6 +1002,12 @@
                                            tfLiteNode,
                                            nodeIndex,
                                            kTfLiteBuiltinRelu6);
+        case kTfLiteBuiltinReshape:
+            return VisitReshapeOperator(delegateData,
+                                        tfLiteContext,
+                                        tfLiteNode,
+                                        nodeIndex,
+                                        kTfLiteBuiltinReshape);
         case kTfLiteBuiltinResizeNearestNeighbor:
             return VisitResizeOperator(delegateData,
                                        tfLiteContext,