IVGCVSW-7605 IVGCVSW-7604 Implement Squeeze and ExpandDims operators for Classic and Opaque Delegate

 * Implemented unsupported operators in Classic Delegate.
 * Added unit tests.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: Ib39eeea53c114b15943e8dc2e796ce64c40cb3a5
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 003dffa..ef04913 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -150,6 +150,7 @@
         test/ElementwiseBinaryTestHelper.hpp
         test/ElementwiseUnaryTest.cpp
         test/ElementwiseUnaryTestHelper.hpp
+        test/ExpandDimsTest.cpp
         test/FillTest.cpp
         test/FillTestHelper.hpp
         test/FullyConnectedTest.cpp
@@ -193,6 +194,7 @@
         test/ShapeTestHelper.hpp
         test/SliceTest.cpp
         test/SliceTestHelper.hpp
+        test/SqueezeTest.cpp
         test/StridedSliceTest.cpp
         test/StridedSliceTestHelper.hpp
         test/SplitTest.cpp
@@ -280,6 +282,7 @@
              test/DepthwiseConvolution2dTest.cpp
              test/ElementwiseUnaryTestHelper.hpp
              test/ElementwiseUnaryTest.cpp
+             test/ExpandDimsTest.cpp
              test/FullyConnectedTest.cpp
              test/FullyConnectedTestHelper.hpp
              test/GatherTest.cpp
@@ -319,6 +322,7 @@
              test/SoftmaxTestHelper.hpp
              test/SpaceDepthTest.cpp
              test/SpaceDepthTestHelper.hpp
+             test/SqueezeTest.cpp
              test/StridedSliceTest.cpp
              test/StridedSliceTestHelper.hpp
              test/TestUtils.hpp
diff --git a/delegate/classic/src/Redefine.hpp b/delegate/classic/src/Redefine.hpp
index 41c62c3..2c29083 100644
--- a/delegate/classic/src/Redefine.hpp
+++ b/delegate/classic/src/Redefine.hpp
@@ -5,8 +5,6 @@
 
 #pragma once
 
-#include <armnn/utility/IgnoreUnused.hpp>
-
 #include <ClassicDelegateUtils.hpp>
 
 #include <tensorflow/lite/builtin_ops.h>
@@ -231,13 +229,83 @@
                                   int nodeIndex,
                                   int32_t operatorCode)
 {
-    armnn::IgnoreUnused(delegateData,
-                        tfLiteContext,
-                        tfLiteNode,
-                        nodeIndex,
-                        operatorCode);
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
 
-    return kTfLiteError;
+    const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+    const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+    if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    auto* options = reinterpret_cast<TfLiteSqueezeParams*>(tfLiteNode->builtin_data);
+
+    const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+
+    std::vector<uint32_t> squeezeDim;
+    // A single negative dim index is interpreted as a negative index in python
+    // Meaning the index will be the shape size plus the negative index value
+    if (options->num_squeeze_dims == 1 && options->squeeze_dims[0] < 0)
+    {
+        int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
+        squeezeDim.push_back(static_cast<uint32_t>(dim));
+    }
+    else
+    {
+        for (int32_t i = 0; i < options->num_squeeze_dims; ++i)
+        {
+            squeezeDim.push_back(static_cast<uint32_t>(options->squeeze_dims[i]));
+        }
+    }
+
+    armnn::TensorInfo outputTensorInfo = OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
+
+    armnn::ReshapeDescriptor reshapeDesc;
+    reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
+
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC("SQUEEZE",
+                                   tfLiteContext,
+                                   IsReshapeSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   setBackend,
+                                   inputTensorInfo,
+                                   outInfo,
+                                   reshapeDesc);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+    layer->SetBackendId(setBackend);
+    ARMNN_ASSERT(layer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
+
+    // Connect
+    return Connect(layer, tfLiteNode, delegateData);
 }
 
 TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData,
@@ -246,13 +314,104 @@
                                      int nodeIndex,
                                      int32_t operatorCode)
 {
-    armnn::IgnoreUnused(delegateData,
-                        tfLiteContext,
-                        tfLiteNode,
-                        nodeIndex,
-                        operatorCode);
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
 
-    return kTfLiteError;
+    const TfLiteTensor* tfLiteTensors = tfLiteContext->tensors;
+    const TfLiteTensor& tfLiteInputTensor = tfLiteTensors[tfLiteNode->inputs->data[0]];
+    if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const TfLiteTensor& tfLiteAxisTensor = tfLiteTensors[tfLiteNode->inputs->data[1]];
+    if (!IsValid(tfLiteContext, tfLiteAxisTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const TfLiteTensor& tfLiteOutputTensor = tfLiteTensors[tfLiteNode->outputs->data[0]];
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteInputTensor);
+    armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteTensor(tfLiteOutputTensor);
+
+    auto* axisTensorData = tflite::GetTensorData<int32_t>(&tfLiteAxisTensor);
+    int32_t axis = axisTensorData[0];
+
+    int32_t inputDimSize = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
+    if (axis > inputDimSize || axis < 0 - (inputDimSize + 1))
+    {
+        TF_LITE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Axis must be in range "
+                "[0 - (inputDimSize + 1), inputDimSize] inclusive.");
+        return kTfLiteError;
+    }
+
+    if(axis < 0)
+    {
+        axis = inputDimSize + axis + 1;
+    }
+
+    std::vector<unsigned int> shape(static_cast<unsigned int>(inputDimSize) + 1);
+    unsigned int inputShapeIndex = 0;
+    for (unsigned int i = 0; i < static_cast<unsigned int>(inputDimSize + 1); ++i)
+    {
+        if (i == static_cast<unsigned int>(axis))
+        {
+            shape[i] = 1;
+        }
+        else
+        {
+            shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
+            ++inputShapeIndex;
+        }
+    }
+
+    armnn::ReshapeDescriptor reshapeDesc;
+    reshapeDesc.m_TargetShape = armnn::TensorShape(static_cast<unsigned int>(inputDimSize + 1), shape.data());
+
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC("EXPAND_DIMS",
+                                   tfLiteContext,
+                                   IsReshapeSupported,
+                                   delegateData.m_Backends,
+                                   isSupported,
+                                   setBackend,
+                                   inputTensorInfo,
+                                   outInfo,
+                                   reshapeDesc);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+    layer->SetBackendId(setBackend);
+    ARMNN_ASSERT(layer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+    outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
+
+    // Connect
+    return Connect(layer, tfLiteNode, delegateData);
 }
 
 } // namespace armnnDelegate
diff --git a/delegate/common/src/DelegateUtils.hpp b/delegate/common/src/DelegateUtils.hpp
index 37fe9b5..1671a4c 100644
--- a/delegate/common/src/DelegateUtils.hpp
+++ b/delegate/common/src/DelegateUtils.hpp
@@ -169,4 +169,56 @@
     return kTfLiteOk;
 }
 
+armnn::TensorInfo OutputShapeOfSqueeze(std::vector<uint32_t> squeezeDims,
+                                       const armnn::TensorInfo& inputTensorInfo)
+{
+    static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
+
+    if (inputTensorInfo.GetNumDimensions() > 4)
+    {
+        std::stringstream ss;
+        ss << "Input tensor has unexpected number of dimensions:"
+           << inputTensorInfo.GetNumDimensions()
+           << " shape:" << inputTensorInfo.GetShape()
+           << " "
+           << CHECK_LOCATION().AsString();
+        throw armnn::ParseException(ss.str());
+    }
+
+    if (squeezeDims.empty())
+    {
+        squeezeDims.assign(dimensionSequence, dimensionSequence + inputTensorInfo.GetNumDimensions());
+    }
+
+    std::vector<uint32_t> outputDims;
+    for(unsigned int i = 0; i < inputTensorInfo.GetNumDimensions(); i++)
+    {
+        bool skipSqueeze = (std::find(squeezeDims.begin(), squeezeDims.end(), i) == squeezeDims.end());
+        auto currentDimension = inputTensorInfo.GetShape()[i];
+        if (skipSqueeze || currentDimension != 1)
+        {
+            outputDims.push_back(currentDimension);
+        }
+    }
+
+    if (outputDims.size() > 4)
+    {
+        std::stringstream ss;
+        ss << "Output tensor has unexpected number of dimensions:"
+           << inputTensorInfo.GetNumDimensions()
+           << " shape:" << inputTensorInfo.GetShape()
+           << " "
+           << CHECK_LOCATION().AsString();
+        throw armnn::ParseException(ss.str());
+    }
+
+    armnn::TensorShape outShape = armnn::TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
+
+    // We need to preserve the tensor type and the quantization data as well
+    armnn::TensorInfo outTensorInfo = inputTensorInfo;
+    outTensorInfo.SetShape(outShape);
+
+    return outTensorInfo;
+}
+
 } // namespace anonymous
diff --git a/delegate/opaque/src/Redefine.hpp b/delegate/opaque/src/Redefine.hpp
index dc424cf..ce90af0 100644
--- a/delegate/opaque/src/Redefine.hpp
+++ b/delegate/opaque/src/Redefine.hpp
@@ -259,4 +259,241 @@
     return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
 }
 
+TfLiteStatus VisitSqueezeOperator(DelegateData& delegateData,
+                                  TfLiteOpaqueContext* tfLiteContext,
+                                  TfLiteOpaqueNode* tfLiteNode,
+                                  int nodeIndex,
+                                  int32_t operatorCode)
+{
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    // Gather input indices and use to get input tensor.
+    int numInputs = 0;
+    const int* inputTensors;
+    if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+
+    const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    // Gather output indices and use to get output tensors.
+    int numOutputs = 0;
+    const int* outputTensors;
+    if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+
+    const TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    auto* options = reinterpret_cast<TfLiteSqueezeParams*>(TfLiteOpaqueNodeGetBuiltinData(tfLiteNode));
+
+    const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+
+    std::vector<uint32_t> squeezeDim;
+    // A single negative dim index is interpreted as a negative index in python
+    // Meaning the index will be the shape size plus the negative index value
+    if (options->num_squeeze_dims == 1 && options->squeeze_dims[0] < 0)
+    {
+        int32_t dim = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions()) + options->squeeze_dims[0];
+        squeezeDim.push_back(static_cast<uint32_t>(dim));
+    }
+    else
+    {
+        for (int32_t i = 0; i < options->num_squeeze_dims; ++i)
+        {
+            squeezeDim.push_back(static_cast<uint32_t>(options->squeeze_dims[i]));
+        }
+    }
+
+    armnn::TensorInfo outputTensorInfo = OutputShapeOfSqueeze(squeezeDim, inputTensorInfo);
+
+    armnn::ReshapeDescriptor reshapeDesc;
+    reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
+
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("SQUEEZE",
+                                          tfLiteContext,
+                                          IsReshapeSupported,
+                                          delegateData.m_Backends,
+                                          isSupported,
+                                          setBackend,
+                                          inputTensorInfo,
+                                          outInfo,
+                                          reshapeDesc);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+    layer->SetBackendId(setBackend);
+    ARMNN_ASSERT(layer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
+
+    // Connect
+    return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
+TfLiteStatus VisitExpandDimsOperator(DelegateData& delegateData,
+                                     TfLiteOpaqueContext* tfLiteContext,
+                                     TfLiteOpaqueNode* tfLiteNode,
+                                     int nodeIndex,
+                                     int32_t operatorCode)
+{
+    TF_LITE_ENSURE_STATUS(ValidateNumInputs(tfLiteContext, tfLiteNode, 2, nodeIndex));
+    TF_LITE_ENSURE_STATUS(ValidateNumOutputs(tfLiteContext, tfLiteNode, 1, nodeIndex));
+
+    // Gather input indices and use to get input tensor.
+    int numInputs = 0;
+    const int* inputTensors;
+    if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather input tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+
+    const TfLiteOpaqueTensor* tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteInputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const TfLiteOpaqueTensor* tfLiteAxisTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[1]);
+    if (!IsValid(tfLiteContext, tfLiteAxisTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    // Gather output indices and use to get output tensors.
+    int numOutputs = 0;
+    const int* outputTensors;
+    if (TfLiteOpaqueNodeOutputs(tfLiteNode, &outputTensors, &numOutputs) != kTfLiteOk)
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Unable to gather output tensor indices from node #%d: ",
+                nodeIndex);
+        return kTfLiteError;
+    }
+
+    TfLiteOpaqueTensor* tfLiteOutputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, outputTensors[0]);
+    if (!IsValid(tfLiteContext, tfLiteOutputTensor, operatorCode, nodeIndex))
+    {
+        return kTfLiteError;
+    }
+
+    const armnn::TensorInfo& inputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteInputTensor);
+    armnn::TensorInfo outputTensorInfo = GetTensorInfoForTfLiteOpaqueTensor(tfLiteOutputTensor);
+
+    auto* axisTensorData = static_cast<int32_t*>(TfLiteOpaqueTensorData(tfLiteAxisTensor));
+    int32_t axis = axisTensorData[0];
+
+    int32_t inputDimSize = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
+    if (axis > inputDimSize || axis < 0 - (inputDimSize + 1))
+    {
+        TF_LITE_OPAQUE_MAYBE_KERNEL_LOG(
+                tfLiteContext,
+                "TfLiteArmnnOpaqueDelegate: Axis must be in range "
+                "[0 - (inputDimSize + 1), inputDimSize] inclusive.");
+        return kTfLiteError;
+    }
+
+    if(axis < 0)
+    {
+        axis = inputDimSize + axis + 1;
+    }
+
+    std::vector<unsigned int> shape(static_cast<unsigned int>(inputDimSize) + 1);
+    unsigned int inputShapeIndex = 0;
+    for (unsigned int i = 0; i < static_cast<unsigned int>(inputDimSize + 1); ++i)
+    {
+        if (i == static_cast<unsigned int>(axis))
+        {
+            shape[i] = 1;
+        }
+        else
+        {
+            shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
+            ++inputShapeIndex;
+        }
+    }
+
+    armnn::ReshapeDescriptor reshapeDesc;
+    reshapeDesc.m_TargetShape = armnn::TensorShape(static_cast<unsigned int>(inputDimSize + 1), shape.data());
+
+    bool isSupported = false;
+    armnn::BackendId setBackend;
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_OPAQUE_SUPPORT_FUNC("EXPAND_DIMS",
+                                          tfLiteContext,
+                                          IsReshapeSupported,
+                                          delegateData.m_Backends,
+                                          isSupported,
+                                          setBackend,
+                                          inputTensorInfo,
+                                          outInfo,
+                                          reshapeDesc);
+    };
+
+    if (!delegateData.m_Network)
+    {
+        validateFunc(outputTensorInfo, isSupported);
+        return isSupported ? kTfLiteOk : kTfLiteError;
+    }
+
+    armnn::IConnectableLayer* layer = delegateData.m_Network->AddReshapeLayer(reshapeDesc);
+    layer->SetBackendId(setBackend);
+    ARMNN_ASSERT(layer != nullptr);
+
+    armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+    outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
+    outputSlot.SetTensorInfo(outputTensorInfo);
+
+    // try to connect the Constant Inputs if there are any
+    if(ProcessInputs(layer, delegateData, tfLiteContext, tfLiteNode) != kTfLiteOk)
+    {
+        return kTfLiteError;
+    }
+
+    // Connect
+    return Connect(layer, tfLiteContext, tfLiteNode, delegateData);
+}
+
 }
diff --git a/delegate/opaque/src/armnn_delegate.cpp b/delegate/opaque/src/armnn_delegate.cpp
index f7476d1..cae1ea5 100644
--- a/delegate/opaque/src/armnn_delegate.cpp
+++ b/delegate/opaque/src/armnn_delegate.cpp
@@ -764,6 +764,12 @@
                                                  nodeIndex,
                                                  kTfLiteBuiltinExp,
                                                  armnn::UnaryOperation::Exp);
+        case kTfLiteBuiltinExpandDims:
+            return VisitExpandDimsOperator(delegateData,
+                                           tfLiteContext,
+                                           tfLiteNode,
+                                           nodeIndex,
+                                           kTfLiteBuiltinExpandDims);
         case kTfLiteBuiltinFloor:
             return VisitFloorOperator(delegateData,
                                       tfLiteContext,
@@ -1089,6 +1095,12 @@
                                                  nodeIndex,
                                                  kTfLiteBuiltinSqrt,
                                                  armnn::UnaryOperation::Sqrt);
+        case kTfLiteBuiltinSqueeze:
+            return VisitSqueezeOperator(delegateData,
+                                        tfLiteContext,
+                                        tfLiteNode,
+                                        nodeIndex,
+                                        kTfLiteBuiltinSqueeze);
         case kTfLiteBuiltinStridedSlice:
             return VisitStridedSliceOperator(delegateData,
                                              tfLiteContext,
diff --git a/delegate/test/ExpandDimsTest.cpp b/delegate/test/ExpandDimsTest.cpp
new file mode 100644
index 0000000..8c21f73
--- /dev/null
+++ b/delegate/test/ExpandDimsTest.cpp
@@ -0,0 +1,102 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RedefineTestHelper.hpp"
+
+namespace armnnDelegate
+{
+
+void ExpandDimsSimpleTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape  { 2, 2, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+    std::vector<int32_t> axis { 0 };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4 };
+    std::vector<float> expectedOutputValues = { 1, 2, 3, 4 };
+
+    RedefineTest<float>(tflite::BuiltinOperator_EXPAND_DIMS,
+                        ::tflite::TensorType_FLOAT32,
+                        backends,
+                        inputShape,
+                        outputShape,
+                        inputValues,
+                        expectedOutputValues,
+                        axis);
+}
+
+void ExpandDimsWithNegativeAxisTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape  { 1, 2, 2 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+    std::vector<int32_t> axis { -1 };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4 };
+    std::vector<float> expectedOutputValues = { 1, 2, 3, 4 };
+
+    RedefineTest<float>(tflite::BuiltinOperator_EXPAND_DIMS,
+                        ::tflite::TensorType_FLOAT32,
+                        backends,
+                        inputShape,
+                        outputShape,
+                        inputValues,
+                        expectedOutputValues,
+                        axis);
+}
+
+TEST_SUITE("ExpandDims_GpuAccTests")
+{
+
+TEST_CASE ("ExpandDims_Simple_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ExpandDimsSimpleTest(backends);
+}
+
+TEST_CASE ("ExpandDims_With_Negative_Axis_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ExpandDimsWithNegativeAxisTest(backends);
+}
+
+} // TEST_SUITE("ExpandDims_GpuAccTests")
+
+TEST_SUITE("ExpandDims_CpuAccTests")
+{
+
+TEST_CASE ("ExpandDims_Simple_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ExpandDimsSimpleTest(backends);
+}
+
+TEST_CASE ("ExpandDims_With_Negative_Axis_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ExpandDimsWithNegativeAxisTest(backends);
+}
+
+} // TEST_SUITE("ExpandDims_CpuAccTests")
+
+TEST_SUITE("ExpandDims_CpuRefTests")
+{
+
+TEST_CASE ("ExpandDims_Simple_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ExpandDimsSimpleTest(backends);
+}
+
+TEST_CASE ("ExpandDims_With_Negative_Axis_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ExpandDimsWithNegativeAxisTest(backends);
+}
+
+} // TEST_SUITE("ExpandDims_CpuRefTests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/RedefineTestHelper.hpp b/delegate/test/RedefineTestHelper.hpp
index 80631cc..af9b446 100644
--- a/delegate/test/RedefineTestHelper.hpp
+++ b/delegate/test/RedefineTestHelper.hpp
@@ -21,7 +21,7 @@
 namespace
 {
 
-std::vector<char> CreateRedefineTfLiteModel(
+std::vector<char> CreateReshapeTfLiteModel(
         tflite::BuiltinOperator redefineOperatorCode,
         tflite::TensorType tensorType,
         const std::vector<int32_t>& inputTensorShape,
@@ -141,6 +141,127 @@
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
 
+std::vector<char> CreateRedefineTfLiteModel(
+        tflite::BuiltinOperator redefineOperatorCode,
+        tflite::TensorType tensorType,
+        const std::vector<int32_t>& inputTensorShape,
+        const std::vector<int32_t>& outputTensorShape,
+        const std::vector<int32_t>& squeezeOrAxisData,
+        float quantScale = 1.0f,
+        int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    auto inputTensor = CreateTensor(flatBufferBuilder,
+                                    flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                            inputTensorShape.size()),
+                                    tensorType,
+                                    1,
+                                    flatBufferBuilder.CreateString("input"),
+                                    quantizationParameters);
+
+    std::vector<flatbuffers::Offset<Tensor>> tensors;
+    std::vector<int32_t> operatorInputs;
+    std::vector<int> subgraphInputs;
+    flatbuffers::Offset<void> operatorBuiltinOptions;
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_SqueezeOptions;
+
+    if (redefineOperatorCode == tflite::BuiltinOperator_SQUEEZE)
+    {
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
+        auto outputTensor = CreateTensor(flatBufferBuilder,
+                                         flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                                 outputTensorShape.size()),
+                                         tensorType,
+                                         2,
+                                         flatBufferBuilder.CreateString("output"),
+                                         quantizationParameters);
+        tensors = { inputTensor, outputTensor};
+        operatorInputs = {0};
+        subgraphInputs = {0};
+        operatorBuiltinOptions =
+                CreateSqueezeOptions(flatBufferBuilder,
+                                     flatBufferBuilder.CreateVector(squeezeOrAxisData.data(),
+                                                                    squeezeOrAxisData.size())).Union();
+
+        operatorBuiltinOptionsType = BuiltinOptions_SqueezeOptions;
+    }
+    else if (redefineOperatorCode == tflite::BuiltinOperator_EXPAND_DIMS)
+    {
+        buffers.push_back(
+                CreateBuffer(flatBufferBuilder,
+                             flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(squeezeOrAxisData.data()),
+                                                            sizeof(int32_t) * squeezeOrAxisData.size())));
+        auto shapeTensor = CreateTensor(flatBufferBuilder,
+                                        flatBufferBuilder.CreateVector<int32_t>( { 1 } ),
+                                        tflite::TensorType_INT32,
+                                        2,
+                                        flatBufferBuilder.CreateString("axis"));
+
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
+        auto outputTensor = CreateTensor(flatBufferBuilder,
+                                         flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                                 outputTensorShape.size()),
+                                         tensorType,
+                                         3,
+                                         flatBufferBuilder.CreateString("output"),
+                                         quantizationParameters);
+
+        tensors = { inputTensor, outputTensor, shapeTensor };
+        operatorInputs = {0, 2};
+        subgraphInputs = {0, 2};
+        operatorBuiltinOptions = CreateExpandDimsOptions(flatBufferBuilder).Union();
+
+        operatorBuiltinOptionsType = BuiltinOptions_ExpandDimsOptions;
+    }
+
+    const std::vector<int32_t> operatorOutputs{1};
+    flatbuffers::Offset <Operator> redefineOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphOutputs{1};
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&redefineOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: Redefine Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         redefineOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
 template <typename T>
 void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
                   tflite::TensorType tensorType,
@@ -149,20 +270,45 @@
                   std::vector<int32_t>& outputShape,
                   std::vector<T>& inputValues,
                   std::vector<T>& expectedOutputValues,
-                  std::vector<int32_t>& targetShape,
+                  std::vector<int32_t>& additionalData,
                   bool useOption = true,
                   float quantScale = 1.0f,
                   int quantOffset  = 0)
 {
     using namespace delegateTestInterpreter;
-    std::vector<char> modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
-                                                              tensorType,
-                                                              inputShape,
-                                                              outputShape,
-                                                              targetShape,
-                                                              useOption,
-                                                              quantScale,
-                                                              quantOffset);
+
+    std::vector<char> modelBuffer;
+    if (redefineOperatorCode == tflite::BuiltinOperator_EXPAND_DIMS)
+    {
+        modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
+                                                tensorType,
+                                                inputShape,
+                                                outputShape,
+                                                additionalData,
+                                                quantScale,
+                                                quantOffset);
+    }
+    else if (redefineOperatorCode == tflite::BuiltinOperator_RESHAPE)
+    {
+        modelBuffer = CreateReshapeTfLiteModel(redefineOperatorCode,
+                                               tensorType,
+                                               inputShape,
+                                               outputShape,
+                                               additionalData,
+                                               useOption,
+                                               quantScale,
+                                               quantOffset);
+    }
+    else if (redefineOperatorCode == tflite::BuiltinOperator_SQUEEZE)
+    {
+        modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
+                                                tensorType,
+                                                inputShape,
+                                                outputShape,
+                                                additionalData,
+                                                quantScale,
+                                                quantOffset);
+    }
 
     // Setup interpreter with just TFLite Runtime.
     auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
diff --git a/delegate/test/SqueezeTest.cpp b/delegate/test/SqueezeTest.cpp
new file mode 100644
index 0000000..01122c9
--- /dev/null
+++ b/delegate/test/SqueezeTest.cpp
@@ -0,0 +1,102 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RedefineTestHelper.hpp"
+
+namespace armnnDelegate
+{
+
+void SqueezeSimpleTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape  { 1, 2, 2, 1 };
+    std::vector<int32_t> outputShape { 2, 2 };
+    std::vector<int32_t> squeezeDims { };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4 };
+    std::vector<float> expectedOutputValues = { 1, 2, 3, 4 };
+
+    RedefineTest<float>(tflite::BuiltinOperator_SQUEEZE,
+                        ::tflite::TensorType_FLOAT32,
+                        backends,
+                        inputShape,
+                        outputShape,
+                        inputValues,
+                        expectedOutputValues,
+                        squeezeDims);
+}
+
+void SqueezeWithDimsTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape  { 1, 2, 2, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2 };
+    std::vector<int32_t> squeezeDims { -1 };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4 };
+    std::vector<float> expectedOutputValues = { 1, 2, 3, 4 };
+
+    RedefineTest<float>(tflite::BuiltinOperator_SQUEEZE,
+                        ::tflite::TensorType_FLOAT32,
+                        backends,
+                        inputShape,
+                        outputShape,
+                        inputValues,
+                        expectedOutputValues,
+                        squeezeDims);
+}
+
+TEST_SUITE("Squeeze_GpuAccTests")
+{
+
+TEST_CASE ("Squeeze_Simple_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    SqueezeSimpleTest(backends);
+}
+
+TEST_CASE ("Squeeze_With_Dims_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    SqueezeWithDimsTest(backends);
+}
+
+} // TEST_SUITE("Squeeze_GpuAccTests")
+
+TEST_SUITE("Squeeze_CpuAccTests")
+{
+
+TEST_CASE ("Squeeze_Simple_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    SqueezeSimpleTest(backends);
+}
+
+TEST_CASE ("Squeeze_With_Dims_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    SqueezeWithDimsTest(backends);
+}
+
+} // TEST_SUITE("Squeeze_CpuAccTests")
+
+TEST_SUITE("Squeeze_CpuRefTests")
+{
+
+TEST_CASE ("Squeeze_Simple_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    SqueezeSimpleTest(backends);
+}
+
+TEST_CASE ("Squeeze_With_Dims_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    SqueezeWithDimsTest(backends);
+}
+
+} // TEST_SUITE("Squeeze_CpuRefTests")
+
+} // namespace armnnDelegate
\ No newline at end of file