IVGCVSW-7555 Restructure Delegate

* New folders created:
  * common is for common code where TfLite API is not used
  * classic is for existing delegate implementations
  * opaque is for new opaque delegate implementation,
  * tests is for shared between existing Delegate and Opaque Delegate which have test utils to work which delegate to use.
* Existing delegate is built to libarmnnDelegate.so and opaque delegate is built as libarmnnOpaqueDelegate.so
* Opaque structure is introduced but no API is added yet.
* CmakeList.txt and delegate/CMakeList.txt have been modified and 2 new CmakeList.txt added
* Rename BUILD_ARMNN_TFLITE_DELEGATE as BUILD_CLASSIC_DELEGATE
* Rename BUILD_ARMNN_TFLITE_OPAQUE_DELEGATE as BUILD_OPAQUE_DELEGATE

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Ib682b9ad0ac8d8acdc4ec6d9099bb0008a9fe8ed
diff --git a/delegate/test/ActivationTest.cpp b/delegate/test/ActivationTest.cpp
new file mode 100644
index 0000000..8f2f198
--- /dev/null
+++ b/delegate/test/ActivationTest.cpp
@@ -0,0 +1,296 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ActivationTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+
+void ActivationReLuTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<float> inputData = {
+            -0.1f, -0.2f, -0.3f, -0.4f,
+            0.1f,  0.2f,  0.3f,  0.4f,
+            -1.0f, -2.0f, -3.0f, -4.0f,
+            1.0f,  2.0f,  3.0f,  4.0f
+    };
+
+    // Calculate output values for input.
+    auto f = [](float value)
+    {
+        return std::fmax(0.0f, value);
+    };
+    std::vector<float> outputExpectedData(inputData.size());
+    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+    ActivationTest(tflite::BuiltinOperator_RELU,
+                   backends,
+                   inputData,
+                   outputExpectedData);
+}
+
+void ActivationBoundedReluTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<float> inputData = {
+            -0.1f, -0.2f, -0.3f, -0.4f,
+            0.1f,  0.2f,  0.3f,  0.4f,
+            -1.0f, -2.0f, -3.0f, -4.0f,
+            1.0f,  2.0f,  3.0f,  4.0f
+    };
+
+    const float a = 6.0f;
+    const float b = 0.0f;
+    // Calculate output values for input.
+    auto f = [a, b](float value)
+    {
+        return std::min(a, std::max(b, value));
+    };
+    std::vector<float> outputExpectedData(inputData.size());
+    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+    ActivationTest(tflite::BuiltinOperator_RELU6,
+                   backends,
+                   inputData,
+                   outputExpectedData);
+}
+
+void ActivationSigmoidTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<float> inputData = {
+            -0.1f, -0.2f, -0.3f, -0.4f,
+            0.1f,  0.2f,  0.3f,  0.4f,
+            -1.0f, -2.0f, -3.0f, -4.0f,
+            1.0f,  2.0f,  3.0f,  4.0f
+    };
+
+    // Calculate output values for input.
+    auto f = [](float value)
+    {
+        return 1.0f / (1.0f + std::exp(-value));
+    };
+    std::vector<float> outputExpectedData(inputData.size());
+    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+    ActivationTest(tflite::BuiltinOperator_LOGISTIC,
+                   backends,
+                   inputData,
+                   outputExpectedData);
+}
+
+
+void ActivationTanHTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<float> inputData = {
+            -0.1f, -0.2f, -0.3f, -0.4f,
+            0.1f,  0.2f,  0.3f,  0.4f,
+            -1.0f, -2.0f, -3.0f, -4.0f,
+            1.0f,  2.0f,  3.0f,  4.0f
+    };
+
+    // Calculate output values for input.
+    auto f = [](float value)
+    {
+        return tanhf(value);
+    };
+    std::vector<float> outputExpectedData(inputData.size());
+    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+    ActivationTest(tflite::BuiltinOperator_TANH,
+                   backends,
+                   inputData,
+                   outputExpectedData);
+}
+
+void ActivationEluTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<float> inputData = {
+            -0.1f, -0.2f, -0.3f, -0.4f,
+            0.1f,  0.2f,  0.3f,  0.4f,
+            -1.0f, -2.0f, -3.0f, -4.0f,
+            1.0f,  2.0f,  3.0f,  4.0f
+    };
+
+    // Calculate output values for input.
+    auto f = [](float value)
+    {
+        if (value < 0)
+        {
+            // alpha * (exp(x) - 1)
+            return 1 * (std::exp(value) - 1);
+        }
+        return value;
+    };
+    std::vector<float> outputExpectedData(inputData.size());
+    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+    ActivationTest(tflite::BuiltinOperator_ELU,
+                   backends,
+                   inputData,
+                   outputExpectedData);
+}
+
+void ActivationHardSwishTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<float> inputData = {
+            -0.1f, -0.2f, -0.3f, -0.4f,
+            0.1f,  0.2f,  0.3f,  0.4f,
+            -1.0f, -2.0f, -3.0f, -4.0f,
+            1.0f,  2.0f,  3.0f,  4.0f
+    };
+
+    // Calculate output values for input.
+    auto f = [](float x)
+    {
+        // Break down the calculation to help with verification.
+        // hard_swish(x) = x * relu6(x+3) / 6
+        // relu6(x) = min(max(x,0),6)
+        float reLu6_step1 = std::max((x + 3),0.0f);
+        float reLu6Complete = std::min(reLu6_step1, 6.0f);
+        float hardSwish_step1 = x * reLu6Complete;
+        float result = hardSwish_step1 / 6;
+        return result;
+    };
+    std::vector<float> outputExpectedData(inputData.size());
+    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+    ActivationTest(tflite::BuiltinOperator_HARD_SWISH,
+                   backends,
+                   inputData,
+                   outputExpectedData);
+}
+
+TEST_SUITE("Activation_CpuRefTests")
+{
+
+TEST_CASE ("Activation_ReLu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ActivationReLuTest(backends);
+}
+
+TEST_CASE ("Activation_Bounded_Relu6_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ActivationBoundedReluTest(backends);
+}
+
+TEST_CASE ("Activation_Sigmoid_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ActivationSigmoidTest(backends);
+}
+
+TEST_CASE ("Activation_TanH_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ActivationTanHTest(backends);
+}
+
+TEST_CASE ("Activation_Elu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ActivationEluTest(backends);
+}
+
+TEST_CASE ("Activation_HardSwish_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ActivationHardSwishTest(backends);
+}
+
+}
+
+TEST_SUITE("Activation_CpuAccTests")
+{
+
+TEST_CASE ("Activation_ReLu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ActivationReLuTest(backends);
+}
+
+TEST_CASE ("Activation_Bounded_Relu6_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ActivationBoundedReluTest(backends);
+}
+
+TEST_CASE ("Activation_Sigmoid_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ActivationSigmoidTest(backends);
+}
+
+TEST_CASE ("Activation_TanH_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ActivationTanHTest(backends);
+}
+
+TEST_CASE ("Activation_Elu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ActivationEluTest(backends);
+}
+
+TEST_CASE ("Activation_HardSwish_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ActivationHardSwishTest(backends);
+}
+
+}
+
+TEST_SUITE("Activation_GpuAccTests")
+{
+
+TEST_CASE ("Activation_ReLu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ActivationReLuTest(backends);
+}
+
+TEST_CASE ("Activation_Bounded_Relu6_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ActivationBoundedReluTest(backends);
+}
+
+TEST_CASE ("Activation_Sigmoid_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ActivationSigmoidTest(backends);
+}
+
+TEST_CASE ("Activation_TanH_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ActivationTanHTest(backends);
+}
+
+TEST_CASE ("Activation_Elu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ActivationEluTest(backends);
+}
+
+TEST_CASE ("Activation_HardSwish_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ActivationHardSwishTest(backends);
+}
+
+}
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ActivationTestHelper.hpp b/delegate/test/ActivationTestHelper.hpp
new file mode 100644
index 0000000..110c684
--- /dev/null
+++ b/delegate/test/ActivationTestHelper.hpp
@@ -0,0 +1,130 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateActivationTfLiteModel(tflite::BuiltinOperator activationOperatorCode,
+                                              tflite::TensorType tensorType,
+                                              const std::vector <int32_t>& tensorShape)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder);
+
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
+                              tensorType);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
+                              tensorType);
+
+    // create operator
+    const std::vector<int> operatorInputs{0};
+    const std::vector<int> operatorOutputs{1};
+    flatbuffers::Offset <Operator> unaryOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
+
+    const std::vector<int> subgraphInputs{0};
+    const std::vector<int> subgraphOutputs{1};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&unaryOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Activation Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, activationOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+void ActivationTest(tflite::BuiltinOperator activationOperatorCode,
+                    std::vector<armnn::BackendId>& backends,
+                    std::vector<float>& inputValues,
+                    std::vector<float>& expectedOutputValues)
+{
+    using namespace tflite;
+    std::vector<int32_t> inputShape  { { 4, 1, 4} };
+    std::vector<char> modelBuffer = CreateActivationTfLiteModel(activationOperatorCode,
+                                                                      ::tflite::TensorType_FLOAT32,
+                                                                      inputShape);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+                        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<float>(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 0, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<float>(tfLiteInterpreter,
+                                            armnnDelegateInterpreter,
+                                            inputShape,
+                                            expectedOutputValues);
+
+    tfLiteInterpreter.reset(nullptr);
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ArgMinMaxTest.cpp b/delegate/test/ArgMinMaxTest.cpp
new file mode 100644
index 0000000..1c05503
--- /dev/null
+++ b/delegate/test/ArgMinMaxTest.cpp
@@ -0,0 +1,174 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ArgMinMaxTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void ArgMaxFP32Test(std::vector<armnn::BackendId>& backends, int axisValue)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 2, 4 };
+    std::vector<int32_t> outputShape { 1, 3, 4 };
+    std::vector<int32_t> axisShape { 1 };
+
+    std::vector<float> inputValues = { 1.0f,   2.0f,   3.0f,   4.0f,
+                                       5.0f,   6.0f,   7.0f,   8.0f,
+
+                                       10.0f,  20.0f,  30.0f,  40.0f,
+                                       50.0f,  60.0f,  70.0f,  80.0f,
+
+                                       100.0f, 200.0f, 300.0f, 400.0f,
+                                       500.0f, 600.0f, 700.0f, 800.0f };
+
+    std::vector<int32_t> expectedOutputValues = { 1, 1, 1, 1,
+                                                  1, 1, 1, 1,
+                                                  1, 1, 1, 1 };
+
+    ArgMinMaxTest<float, int32_t>(tflite::BuiltinOperator_ARG_MAX,
+                                  ::tflite::TensorType_FLOAT32,
+                                  backends,
+                                  inputShape,
+                                  axisShape,
+                                  outputShape,
+                                  inputValues,
+                                  expectedOutputValues,
+                                  axisValue,
+                                  ::tflite::TensorType_INT32);
+}
+
+void ArgMinFP32Test(std::vector<armnn::BackendId>& backends, int axisValue)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 2, 4 };
+    std::vector<int32_t> outputShape { 1, 3, 2 };
+    std::vector<int32_t> axisShape { 1 };
+
+    std::vector<float> inputValues = { 1.0f,   2.0f,   3.0f,   4.0f,
+                                       5.0f,   6.0f,   7.0f,   8.0f,
+
+                                       10.0f,  20.0f,  30.0f,  40.0f,
+                                       50.0f,  60.0f,  70.0f,  80.0f,
+
+                                       100.0f, 200.0f, 300.0f, 400.0f,
+                                       500.0f, 600.0f, 700.0f, 800.0f };
+
+    std::vector<int32_t> expectedOutputValues = { 0, 0,
+                                                  0, 0,
+                                                  0, 0 };
+
+    ArgMinMaxTest<float, int32_t>(tflite::BuiltinOperator_ARG_MIN,
+                                  ::tflite::TensorType_FLOAT32,
+                                  backends,
+                                  inputShape,
+                                  axisShape,
+                                  outputShape,
+                                  inputValues,
+                                  expectedOutputValues,
+                                  axisValue,
+                                  ::tflite::TensorType_INT32);
+}
+
+void ArgMaxUint8Test(std::vector<armnn::BackendId>& backends, int axisValue)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 1, 1, 5 };
+    std::vector<int32_t> outputShape { 1, 1, 1 };
+    std::vector<int32_t> axisShape { 1 };
+
+    std::vector<uint8_t> inputValues = { 5, 2, 8, 10, 9 };
+
+    std::vector<int32_t> expectedOutputValues = { 3 };
+
+    ArgMinMaxTest<uint8_t, int32_t>(tflite::BuiltinOperator_ARG_MAX,
+                                    ::tflite::TensorType_UINT8,
+                                    backends,
+                                    inputShape,
+                                    axisShape,
+                                    outputShape,
+                                    inputValues,
+                                    expectedOutputValues,
+                                    axisValue,
+                                    ::tflite::TensorType_INT32);
+}
+
+TEST_SUITE("ArgMinMax_CpuRefTests")
+{
+
+TEST_CASE ("ArgMaxFP32Test_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ArgMaxFP32Test(backends, 2);
+}
+
+TEST_CASE ("ArgMinFP32Test_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ArgMinFP32Test(backends, 3);
+}
+
+TEST_CASE ("ArgMaxUint8Test_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ArgMaxUint8Test(backends, -1);
+}
+
+} // TEST_SUITE("ArgMinMax_CpuRefTests")
+
+TEST_SUITE("ArgMinMax_CpuAccTests")
+{
+
+TEST_CASE ("ArgMaxFP32Test_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ArgMaxFP32Test(backends, 2);
+}
+
+TEST_CASE ("ArgMinFP32Test_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ArgMinFP32Test(backends, 3);
+}
+
+TEST_CASE ("ArgMaxUint8Test_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ArgMaxUint8Test(backends, -1);
+}
+
+} // TEST_SUITE("ArgMinMax_CpuAccTests")
+
+TEST_SUITE("ArgMinMax_GpuAccTests")
+{
+
+TEST_CASE ("ArgMaxFP32Test_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ArgMaxFP32Test(backends, 2);
+}
+
+TEST_CASE ("ArgMinFP32Test_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ArgMinFP32Test(backends, 3);
+}
+
+TEST_CASE ("ArgMaxUint8Test_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ArgMaxUint8Test(backends, -1);
+}
+
+} // TEST_SUITE("ArgMinMax_GpuAccTests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ArgMinMaxTestHelper.hpp b/delegate/test/ArgMinMaxTestHelper.hpp
new file mode 100644
index 0000000..91cf1f8
--- /dev/null
+++ b/delegate/test/ArgMinMaxTestHelper.hpp
@@ -0,0 +1,199 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template <typename InputT, typename OutputT>
+std::vector<char> CreateArgMinMaxTfLiteModel(tflite::BuiltinOperator argMinMaxOperatorCode,
+                                             tflite::TensorType tensorType,
+                                             const std::vector<int32_t>& inputTensorShape,
+                                             const std::vector<int32_t>& axisTensorShape,
+                                             const std::vector<int32_t>& outputTensorShape,
+                                             const std::vector<OutputT> axisValue,
+                                             tflite::TensorType outputType,
+                                             float quantScale = 1.0f,
+                                             int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    auto inputTensor = CreateTensor(flatBufferBuilder,
+                                    flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                            inputTensorShape.size()),
+                                    tensorType,
+                                    1,
+                                    flatBufferBuilder.CreateString("input"),
+                                    quantizationParameters);
+
+    auto axisTensor = CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
+                                                                           axisTensorShape.size()),
+                                   tflite::TensorType_INT32,
+                                   2,
+                                   flatBufferBuilder.CreateString("axis"));
+
+    auto outputTensor = CreateTensor(flatBufferBuilder,
+                                     flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                             outputTensorShape.size()),
+                                     outputType,
+                                     3,
+                                     flatBufferBuilder.CreateString("output"),
+                                     quantizationParameters);
+
+    std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, axisTensor, outputTensor };
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisValue.data()),
+                                                    sizeof(OutputT))));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    std::vector<int32_t> operatorInputs = {{ 0, 1 }};
+    std::vector<int> subgraphInputs = {{ 0, 1 }};
+
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_ArgMaxOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateArgMaxOptions(flatBufferBuilder, outputType).Union();
+
+    if (argMinMaxOperatorCode == tflite::BuiltinOperator_ARG_MIN)
+    {
+        operatorBuiltinOptionsType = BuiltinOptions_ArgMinOptions;
+        operatorBuiltinOptions = CreateArgMinOptions(flatBufferBuilder, outputType).Union();
+    }
+
+    // create operator
+    const std::vector<int32_t> operatorOutputs{ 2 };
+    flatbuffers::Offset <Operator> argMinMaxOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphOutputs{ 2 };
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&argMinMaxOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: ArgMinMax Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         argMinMaxOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename InputT, typename OutputT>
+void ArgMinMaxTest(tflite::BuiltinOperator argMinMaxOperatorCode,
+                   tflite::TensorType tensorType,
+                   const std::vector<armnn::BackendId>& backends,
+                   const std::vector<int32_t>& inputShape,
+                   const std::vector<int32_t>& axisShape,
+                   std::vector<int32_t>& outputShape,
+                   std::vector<InputT>& inputValues,
+                   std::vector<OutputT>& expectedOutputValues,
+                   OutputT axisValue,
+                   tflite::TensorType outputType,
+                   float quantScale = 1.0f,
+                   int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateArgMinMaxTfLiteModel<InputT, OutputT>(argMinMaxOperatorCode,
+                                                                                tensorType,
+                                                                                inputShape,
+                                                                                axisShape,
+                                                                                outputShape,
+                                                                                {axisValue},
+                                                                                outputType,
+                                                                                quantScale,
+                                                                                quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    CHECK(tfLiteModel != nullptr);
+
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<InputT>(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput<InputT>(armnnDelegateInterpreter, 0, inputValues);
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<OutputT>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<OutputT>(armnnDelegateOutputId);
+
+    for (size_t i = 0; i < expectedOutputValues.size(); i++)
+    {
+        CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
+        CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]);
+        CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
+    }
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ArmnnDelegateTest.cpp b/delegate/test/ArmnnDelegateTest.cpp
new file mode 100644
index 0000000..c23c702
--- /dev/null
+++ b/delegate/test/ArmnnDelegateTest.cpp
@@ -0,0 +1,93 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
+#include <doctest/doctest.h>
+
+#include <armnn_delegate.hpp>
+
+#include <tensorflow/lite/kernels/builtin_op_kernels.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+
+namespace armnnDelegate
+{
+
+TEST_SUITE("ArmnnDelegate")
+{
+
+TEST_CASE ("ArmnnDelegate Registered")
+{
+    using namespace tflite;
+    auto tfLiteInterpreter = std::make_unique<Interpreter>();
+
+    tfLiteInterpreter->AddTensors(3);
+    tfLiteInterpreter->SetInputs({0, 1});
+    tfLiteInterpreter->SetOutputs({2});
+
+    tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
+    tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
+    tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());
+
+    tflite::ops::builtin::BuiltinOpResolver opResolver;
+    const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
+    tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, nullptr, opRegister);
+
+    // Create the Armnn Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<armnn::BackendOptions> backendOptions;
+    backendOptions.emplace_back(
+        armnn::BackendOptions{ "BackendName",
+                               {
+                                  { "Option1", 42 },
+                                  { "Option2", true }
+                               }}
+    );
+
+    armnnDelegate::DelegateOptions delegateOptions(backends, backendOptions);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+                       theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                                        armnnDelegate::TfLiteArmnnDelegateDelete);
+
+    auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
+    CHECK(status == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+}
+
+TEST_CASE ("ArmnnDelegateOptimizerOptionsRegistered")
+{
+    using namespace tflite;
+    auto tfLiteInterpreter = std::make_unique<Interpreter>();
+
+    tfLiteInterpreter->AddTensors(3);
+    tfLiteInterpreter->SetInputs({0, 1});
+    tfLiteInterpreter->SetOutputs({2});
+
+    tfLiteInterpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "input1", {1,2,2,1}, TfLiteQuantization());
+    tfLiteInterpreter->SetTensorParametersReadWrite(1, kTfLiteFloat32, "input2", {1,2,2,1}, TfLiteQuantization());
+    tfLiteInterpreter->SetTensorParametersReadWrite(2, kTfLiteFloat32, "output", {1,2,2,1}, TfLiteQuantization());
+
+    tflite::ops::builtin::BuiltinOpResolver opResolver;
+    const TfLiteRegistration* opRegister = opResolver.FindOp(BuiltinOperator_ADD, 1);
+    tfLiteInterpreter->AddNodeWithParameters({0, 1}, {2}, "", 0, nullptr, opRegister);
+
+    // Create the Armnn Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+
+    armnn::OptimizerOptions optimizerOptions(true, true, false, true);
+
+    armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+                       theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                                        armnnDelegate::TfLiteArmnnDelegateDelete);
+
+    auto status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
+    CHECK(status == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+}
+
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/test/BatchMatMulTest.cpp b/delegate/test/BatchMatMulTest.cpp
new file mode 100644
index 0000000..c6d7bc5
--- /dev/null
+++ b/delegate/test/BatchMatMulTest.cpp
@@ -0,0 +1,689 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "BatchMatMulTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+    void BatchMatMul2DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 2, 2 };
+        std::vector<int32_t> RHSInputShape { 2, 2 };
+        std::vector<int32_t> outputShape   { 2, 2 };
+
+        std::vector<float> LHSInputValues = { 1, 2,
+                                              3, 4 };
+
+        std::vector<float> RHSInputValues = { 5, 6,
+                                              7, 8  };
+
+        std::vector<float> expectedOutputValues = { 19, 22,
+                                                    43, 50 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_FLOAT32,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+    void BatchMatMul2DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 2, 2 };
+        std::vector<int32_t> RHSInputShape { 2, 2 };
+        std::vector<int32_t> outputShape   { 2, 2 };
+
+        std::vector<int8_t> LHSInputValues = { 1, 2,
+                                              3, 4 };
+
+        std::vector<int8_t> RHSInputValues = { 5, 6,
+                                              7, 8  };
+
+        std::vector<int8_t> expectedOutputValues = { 19, 22,
+                                                    43, 50 };
+
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_INT8,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul3DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 1,2,2 };
+        std::vector<int32_t> RHSInputShape { 1,2,2 };
+        std::vector<int32_t> outputShape   { 1,2,2 };
+
+        std::vector<float> LHSInputValues = { 1, 2,
+                                              3, 4 };
+
+        std::vector<float> RHSInputValues = { 5, 6,
+                                              7, 8  };
+
+        std::vector<float> expectedOutputValues = { 19, 22,
+                                                    43, 50 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_FLOAT32,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul3DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 1,2,2 };
+        std::vector<int32_t> RHSInputShape { 1,2,2 };
+        std::vector<int32_t> outputShape   { 1,2,2 };
+
+        std::vector<int8_t> LHSInputValues = { 1, 2,
+                                              3, 4 };
+
+        std::vector<int8_t> RHSInputValues = { 5, 6,
+                                              7, 8  };
+
+        std::vector<int8_t> expectedOutputValues = { 19, 22,
+                                                    43, 50 };
+
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_INT8,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul4DFp32SimpleTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 1,1,2,2 };
+        std::vector<int32_t> RHSInputShape { 1,1,2,2 };
+        std::vector<int32_t> outputShape   { 1,1,2,2 };
+
+        std::vector<float> LHSInputValues = { 1, 2,
+                                              3, 4 };
+
+        std::vector<float> RHSInputValues = { 5, 6,
+                                              7, 8  };
+
+        std::vector<float> expectedOutputValues = { 19, 22,
+                                                    43, 50 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_FLOAT32,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul4DInt8SimpleTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 1,1,2,2};
+        std::vector<int32_t> RHSInputShape { 1,1,2,2 };
+        std::vector<int32_t> outputShape   { 1,1,2,2 };
+
+        std::vector<int8_t> LHSInputValues = { 1, 2,
+                                              3, 4 };
+
+        std::vector<int8_t> RHSInputValues = { 5, 6,
+                                              7, 8 };
+
+        std::vector<int8_t> expectedOutputValues = { 19, 22,
+                                                    43, 50 };
+
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_INT8,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul3DFp32BatchTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 2,2,2 };
+        std::vector<int32_t> RHSInputShape { 2,2,2 };
+        std::vector<int32_t> outputShape   { 2,2,2 };
+
+        std::vector<float> LHSInputValues = { 1, 2,
+                                              3, 4,
+
+                                              9, 10,
+                                              11, 12 };
+
+        std::vector<float> RHSInputValues = { 5, 6,
+                                              7, 8,
+
+                                              13, 14,
+                                              15, 16 };
+
+        std::vector<float> expectedOutputValues = { 19, 22,
+                                                    43, 50,
+
+                                                    267, 286,
+                                                    323, 346 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_FLOAT32,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul3DInt8BatchTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 2,2,2 };
+        std::vector<int32_t> RHSInputShape { 2,2,2 };
+        std::vector<int32_t> outputShape   { 2,2,2 };
+
+        std::vector<int8_t> LHSInputValues = { 1, 2,
+                                              3, 4,
+
+                                              9, 10,
+                                              11, 12 };
+
+        std::vector<int8_t> RHSInputValues = { 5, 6,
+                                              7, 8,
+
+                                              1, 2,
+                                              3, 4 };
+
+        std::vector<int8_t> expectedOutputValues = { 19, 22,
+                                                    43, 50,
+
+                                                    39, 58,
+                                                    47, 70 };
+
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_INT8,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul3DFp32BroadcastTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 2,2,2 };
+        std::vector<int32_t> RHSInputShape { 2,2 };
+        std::vector<int32_t> outputShape   { 2,2,2 };
+
+        std::vector<float> LHSInputValues = { 1, 2,
+                                              3, 4,
+
+                                              9, 10,
+                                              11, 12 };
+
+        std::vector<float> RHSInputValues = { 13, 14,
+                                              15, 16 };
+
+        std::vector<float> expectedOutputValues = {  43, 46,
+                                                     99, 106,
+
+                                                     267, 286,
+                                                     323, 346 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_FLOAT32,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul3DInt8BroadcastTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 2,2,2 };
+        std::vector<int32_t> RHSInputShape { 1,2,2 };
+        std::vector<int32_t> outputShape   { 2,2,2 };
+
+        std::vector<int8_t> LHSInputValues = { 1, 2,
+                                              3, 4,
+
+                                              9, 10,
+                                              11, 12 };
+
+        std::vector<int8_t> RHSInputValues = { 1, 2,
+                                               3, 4 };
+
+        std::vector<int8_t> expectedOutputValues = {  7,  10,
+                                                      15, 22,
+
+                                                      39, 58,
+                                                      47, 70 };
+
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_INT8,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul3D2DFp32BroadcastTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 2,2,2 };
+        std::vector<int32_t> RHSInputShape { 2,2 };
+        std::vector<int32_t> outputShape   { 2,2,2 };
+
+        std::vector<float> LHSInputValues = { 1, 2,
+                                              3, 4,
+
+                                              9, 10,
+                                              11, 12 };
+
+        std::vector<float> RHSInputValues = { 13, 14,
+                                              15, 16 };
+
+        std::vector<float> expectedOutputValues = {  43, 46,
+                                                     99, 106,
+
+                                                     267, 286,
+                                                     323, 346 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_FLOAT32,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul3D2DInt8BroadcastTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 2,2,2 };
+        std::vector<int32_t> RHSInputShape { 2,2 };
+        std::vector<int32_t> outputShape   { 2,2,2 };
+
+        std::vector<int8_t> LHSInputValues = { 1, 2,
+                                              3, 4,
+
+                                              9, 10,
+                                              11, 12 };
+
+        std::vector<int8_t> RHSInputValues = { 1, 2,
+                                               3, 4 };
+
+        std::vector<int8_t> expectedOutputValues = {  7, 10,
+                                                      15, 22,
+
+                                                      39, 58,
+                                                      47, 70 };
+
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_INT8,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul2DFp32TinyTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 1,1 };
+        std::vector<int32_t> RHSInputShape { 1,1 };
+        std::vector<int32_t> outputShape   { 1,1 };
+
+        std::vector<float> LHSInputValues = { 3 };
+
+        std::vector<float> RHSInputValues = { 5 };
+
+        std::vector<float> expectedOutputValues = { 15 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_FLOAT32,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+    void BatchMatMul2DInt8TinyTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 1,1 };
+        std::vector<int32_t> RHSInputShape { 1,1 };
+        std::vector<int32_t> outputShape   { 1,1 };
+
+        std::vector<int8_t> LHSInputValues = { 3 };
+
+        std::vector<int8_t> RHSInputValues = { 5 };
+
+        std::vector<int8_t> expectedOutputValues = { 15 };
+
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
+                                ::tflite::TensorType_INT8,
+                                backends,
+                                LHSInputShape,
+                                RHSInputShape,
+                                outputShape,
+                                LHSInputValues,
+                                RHSInputValues,
+                                expectedOutputValues,
+                                false,
+                                false);
+    }
+
+    void BatchMatMulNonSquareFp32Test(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 2,5,3 };
+        std::vector<int32_t> RHSInputShape { 2,3,4 };
+        std::vector<int32_t> outputShape   { 2,5,4 };
+
+        std::vector<float> LHSInputValues = { 8, 8, 4,
+                                              6, 1, 3,
+                                              8, 8, 3,
+                                              8, 9, 8,
+                                              5, 4, 4,
+
+                                              1, 8, 5,
+                                              7, 1, 1,
+                                              8, 7, 9,
+                                              3, 2, 7,
+                                              8, 5, 3 };
+
+        std::vector<float> RHSInputValues = { 6, 2, 3, 2,
+                                              6, 2, 2, 8,
+                                              3, 7, 8, 1,
+
+                                              7, 2, 9, 5,
+                                              2, 3, 1, 3,
+                                              2, 7, 7, 5 };
+
+        std::vector<float> expectedOutputValues = { 108, 60, 72, 84,
+                                                    51, 35, 44, 23,
+                                                    105, 53, 64, 83,
+                                                    126, 90, 106, 96,
+                                                    66, 46, 55, 46,
+
+                                                    33, 61, 52, 54,
+                                                    53, 24, 71, 43,
+                                                    88, 100, 142, 106,
+                                                    39, 61, 78, 56,
+                                                    72, 52, 98, 70 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_FLOAT32,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMulNonSquareInt8Test(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 2,5,3 };
+        std::vector<int32_t> RHSInputShape { 2,3,4 };
+        std::vector<int32_t> outputShape   { 2,5,4 };
+
+        std::vector<int8_t> LHSInputValues = { 8, 8, 4,
+                                              6, 1, 3,
+                                              8, 8, 3,
+                                              8, 9, 8,
+                                              5, 4, 4,
+
+                                              1, 8, 5,
+                                              7, 1, 1,
+                                              8, 7, 9,
+                                              3, 2, 7,
+                                              8, 5, 3 };
+
+        std::vector<int8_t> RHSInputValues = { 6, 2, 3, 2,
+                                              6, 2, 2, 8,
+                                              3, 7, 8, 1,
+
+                                              7, 2, 3, 5,
+                                              2, 3, 1, 3,
+                                              2, 7, 7, 5 };
+
+        std::vector<int8_t> expectedOutputValues = { 108, 60, 72, 84,
+                                                    51, 35, 44, 23,
+                                                    105, 53, 64, 83,
+                                                    126, 90, 106, 96,
+                                                    66, 46, 55, 46,
+
+                                                    33, 61, 46, 54,
+                                                    53, 24, 29, 43,
+                                                    88, 100, 94, 106,
+                                                    39, 61, 60, 56,
+                                                    72, 52, 50, 70 };
+
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_INT8,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               false,
+                               false);
+    }
+
+    void BatchMatMul2DFp32SimpleAdjointTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 3,3 };
+        std::vector<int32_t> RHSInputShape { 3,3 };
+        std::vector<int32_t> outputShape   { 3,3 };
+
+        std::vector<float> LHSInputValues = { 3, 1, 1,
+                                              1, 3, -1,
+                                              2, 4, 1 };
+
+        std::vector<float> RHSInputValues = { 1, 0, 0,
+                                              0, 1, 0,
+                                              0, 0, 1 };
+
+        std::vector<float> expectedOutputValues = { 3, 1, 2,
+                                                    1, 3, 4,
+                                                    1, -1, 1 };
+
+        BatchMatMulTest<float>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_FLOAT32,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               true,
+                               false);
+    }
+
+    void BatchMatMul2DInt8SimpleAdjointTest(std::vector<armnn::BackendId>& backends)
+    {
+        // Set input data
+        std::vector<int32_t> LHSInputShape { 3,3 };
+        std::vector<int32_t> RHSInputShape { 3,3 };
+        std::vector<int32_t> outputShape   { 3,3 };
+
+        std::vector<int8_t> LHSInputValues = { 3, 1, 1,
+                                              1, 3, -1,
+                                              2, 4, 1 };
+
+        std::vector<int8_t> RHSInputValues = { 1, 0, 0,
+                                              0, 1, 0,
+                                              0, 0, 1 };
+
+        std::vector<int8_t> expectedOutputValues = { 3, 1, 2,
+                                                     1, 3, 4,
+                                                     1, -1, 1 };
+
+        BatchMatMulTest<int8_t>(tflite::BuiltinOperator_BATCH_MATMUL,
+                               ::tflite::TensorType_INT8,
+                               backends,
+                               LHSInputShape,
+                               RHSInputShape,
+                               outputShape,
+                               LHSInputValues,
+                               RHSInputValues,
+                               expectedOutputValues,
+                               true,
+                               false);
+    }
+
+    TEST_SUITE("BATCH_MATMUL_CpuRefTests")
+    {
+        TEST_CASE("BATCH_MATMUL_Fp32_CpuRefTests")
+        {
+            std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+            BatchMatMul2DFp32SimpleTest       (backends);
+            BatchMatMul3DFp32SimpleTest       (backends);
+            BatchMatMul4DFp32SimpleTest       (backends);
+            BatchMatMul3DFp32BatchTest        (backends);
+            BatchMatMul3DFp32BroadcastTest    (backends);
+            BatchMatMul3D2DFp32BroadcastTest  (backends);
+            BatchMatMul2DFp32TinyTest         (backends);
+            BatchMatMulNonSquareFp32Test      (backends);
+            BatchMatMul2DFp32SimpleAdjointTest(backends);
+        }
+
+        TEST_CASE("BATCH_MATMUL_Int8_CpuRefTests")
+        {
+            std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+            BatchMatMul2DInt8SimpleTest       (backends);
+            BatchMatMul3DInt8SimpleTest       (backends);
+            BatchMatMul4DInt8SimpleTest       (backends);
+            BatchMatMul3DInt8BatchTest        (backends);
+            BatchMatMul3DInt8BroadcastTest    (backends);
+            BatchMatMul3D2DInt8BroadcastTest  (backends);
+            BatchMatMul2DInt8TinyTest         (backends);
+            BatchMatMulNonSquareInt8Test      (backends);
+            BatchMatMul2DInt8SimpleAdjointTest(backends);
+        }
+    }
+
+    TEST_SUITE("BATCH_MATMUL_CpuAccTests")
+    {
+        TEST_CASE("BATCH_MATMUL_Fp32_CpuAccTests")
+        {
+            std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+            BatchMatMul2DFp32SimpleTest       (backends);
+            BatchMatMul3DFp32SimpleTest       (backends);
+            BatchMatMul4DFp32SimpleTest       (backends);
+            BatchMatMul3DFp32BatchTest        (backends);
+            BatchMatMul3DFp32BroadcastTest    (backends);
+            BatchMatMul3D2DFp32BroadcastTest  (backends);
+            BatchMatMul2DFp32TinyTest         (backends);
+            BatchMatMulNonSquareFp32Test      (backends);
+            BatchMatMul2DFp32SimpleAdjointTest(backends);
+        }
+    }
+    TEST_SUITE("BATCH_MATMUL_GpuAccTests")
+    {
+        TEST_CASE("BATCH_MATMUL_Fp32_GpuAccTests")
+        {
+            std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+            BatchMatMul2DFp32SimpleTest       (backends);
+            BatchMatMul3DFp32SimpleTest       (backends);
+            BatchMatMul4DFp32SimpleTest       (backends);
+            BatchMatMul3DFp32BatchTest        (backends);
+            BatchMatMul3DFp32BroadcastTest    (backends);
+            BatchMatMul3D2DFp32BroadcastTest  (backends);
+            BatchMatMul2DFp32TinyTest         (backends);
+            BatchMatMulNonSquareFp32Test      (backends);
+            BatchMatMul2DFp32SimpleAdjointTest(backends);
+        }
+    }
+}
diff --git a/delegate/test/BatchMatMulTestHelper.hpp b/delegate/test/BatchMatMulTestHelper.hpp
new file mode 100644
index 0000000..32b0a4f
--- /dev/null
+++ b/delegate/test/BatchMatMulTestHelper.hpp
@@ -0,0 +1,208 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+std::vector<char> CreateBatchMatMulTfLiteModel(
+        tflite::BuiltinOperator bmmOperatorCode,
+        tflite::TensorType tensorType,
+        const std::vector <int32_t>& LHSInputTensorShape,
+        const std::vector <int32_t>& RHSInputTensorShape,
+        const std::vector <int32_t>& outputTensorShape,
+        bool adjX = false,
+        bool adjY = false,
+        float quantScale = 1.0f,
+        int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(LHSInputTensorShape.data(),
+                                                                      LHSInputTensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("LHSInput"),
+                              quantizationParameters);
+
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(RHSInputTensorShape.data(),
+                                                                      RHSInputTensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("RHSInput"),
+                              quantizationParameters);
+
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              3,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_BatchMatMulOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateBatchMatMulOptions(flatBufferBuilder,
+                                                                                adjX,
+                                                                                adjY).Union();
+
+    const std::vector<int32_t> operatorInputs{{0, 1}};
+    const std::vector<int32_t> operatorOutputs{2};
+    flatbuffers::Offset <Operator> bmmOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+                                                                   operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{{0, 1}};
+    const std::vector<int> subgraphOutputs{2};
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
+                                                                   subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&bmmOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: BatchMatMul Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, bmmOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode,
+                   tflite::TensorType tensorType,
+                   std::vector<armnn::BackendId>& backends,
+                   std::vector<int32_t>& LHSInputShape,
+                   std::vector<int32_t>& RHSInputShape,
+                   std::vector<int32_t>& outputShape,
+                   std::vector<T>& LHSInputValues,
+                   std::vector<T>& RHSInputValues,
+                   std::vector<T>& expectedOutputValues,
+                   bool adjX = false,
+                   bool adjY = false,
+                   float quantScale = 1.0f,
+                   int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode,
+                                                                 tensorType,
+                                                                 LHSInputShape,
+                                                                 RHSInputShape,
+                                                                 outputShape,
+                                                                 adjX,
+                                                                 adjY,
+                                                                 quantScale,
+                                                                 quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    CHECK(tfLiteModel != nullptr);
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateLHSInputId = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelegateLHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateLHSInputId);
+    auto tfLiteDelegateRHSInputId = tfLiteInterpreter->inputs()[1];
+    auto tfLiteDelegateRHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateRHSInputId);
+    for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
+    {
+        tfLiteDelegateLHSInputData[i] = LHSInputValues[i];
+    }
+    for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
+    {
+        tfLiteDelegateRHSInputData[i] = RHSInputValues[i];
+    }
+
+    auto armnnDelegateLHSInputId = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateLHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateLHSInputId);
+    auto armnnDelegateRHSInputId = armnnDelegateInterpreter->inputs()[1];
+    auto armnnDelegateRHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateRHSInputId);
+    for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
+    {
+        armnnDelegateLHSInputData[i] = LHSInputValues[i];
+    }
+    for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
+    {
+        armnnDelegateRHSInputData[i] = RHSInputValues[i];
+    }
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter,
+                                     outputShape, expectedOutputValues);
+}
+
+} // anonymous namespace
+
+
+
+
diff --git a/delegate/test/BatchSpaceTest.cpp b/delegate/test/BatchSpaceTest.cpp
new file mode 100644
index 0000000..f4c041d
--- /dev/null
+++ b/delegate/test/BatchSpaceTest.cpp
@@ -0,0 +1,299 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "BatchSpaceTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+// BatchToSpaceND Operator
+void BatchToSpaceNDFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 4, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> inputValues { 1.0f, 2.0f, 3.0f, 4.0f };
+    std::vector<float> expectedOutputValues { 1.0f, 2.0f, 3.0f, 4.0f };
+
+    std::vector<unsigned int> blockShape({2, 2});
+    std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
+
+    BatchSpaceTest<float>(tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          inputShape,
+                          expectedOutputShape,
+                          inputValues,
+                          blockShape,
+                          crops,
+                          expectedOutputValues);
+}
+
+void BatchToSpaceNDFp32BatchOneTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> inputValues { 1.0f, 2.0f, 3.0f, 4.0f };
+    std::vector<float> expectedOutputValues { 1.0f, 2.0f, 3.0f, 4.0f };
+
+    std::vector<unsigned int> blockShape({1, 1});
+    std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
+
+    BatchSpaceTest<float>(tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          inputShape,
+                          expectedOutputShape,
+                          inputValues,
+                          blockShape,
+                          crops,
+                          expectedOutputValues);
+}
+
+void BatchToSpaceNDUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 4, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<uint8_t> inputValues { 1, 2, 3, 4, 5, 6, 7 };
+    std::vector<uint8_t> expectedOutputValues { 1, 2, 3, 4, 5, 6, 7 };
+
+    std::vector<unsigned int> blockShape({2, 2});
+    std::vector<std::pair<unsigned int, unsigned int>> crops = {{0, 0}, {0, 0}};
+
+    BatchSpaceTest<uint8_t>(tflite::BuiltinOperator_BATCH_TO_SPACE_ND,
+                          ::tflite::TensorType_UINT8,
+                          backends,
+                          inputShape,
+                          expectedOutputShape,
+                          inputValues,
+                          blockShape,
+                          crops,
+                          expectedOutputValues);
+}
+
+// SpaceToBatchND Operator
+void SpaceToBatchNDFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 4, 1, 1, 1 };
+
+    std::vector<float> inputValues { 1.0f, 2.0f, 3.0f, 4.0f };
+    std::vector<float> expectedOutputValues { 1.0f, 2.0f, 3.0f, 4.0f };
+
+    std::vector<unsigned int> blockShape({2, 2});
+    std::vector<std::pair<unsigned int, unsigned int>> padding = {{0, 0}, {0, 0}};
+
+    BatchSpaceTest<float>(tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          inputShape,
+                          expectedOutputShape,
+                          inputValues,
+                          blockShape,
+                          padding,
+                          expectedOutputValues);
+}
+
+void SpaceToBatchNDFp32PaddingTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 2, 2, 4, 1 };
+    std::vector<int32_t> expectedOutputShape { 8, 1, 3, 1 };
+
+    std::vector<float> inputValues { 1.0f,  2.0f,  3.0f,  4.0f,
+                                     5.0f,  6.0f,  7.0f,  8.0f,
+                                     9.0f,  10.0f, 11.0f, 12.0f,
+                                     13.0f, 14.0f, 15.0f, 16.0f };
+
+    std::vector<float> expectedOutputValues { 0.0f, 1.0f, 3.0f,  0.0f, 9.0f, 11.0f,
+                                              0.0f, 2.0f, 4.0f,  0.0f, 10.0f, 12.0f,
+                                              0.0f, 5.0f, 7.0f,  0.0f, 13.0f, 15.0f,
+                                              0.0f, 6.0f, 8.0f,  0.0f, 14.0f, 16.0f };
+
+    std::vector<unsigned int> blockShape({2, 2});
+    std::vector<std::pair<unsigned int, unsigned int>> padding = {{0, 0}, {2, 0}};
+
+    BatchSpaceTest<float>(tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          inputShape,
+                          expectedOutputShape,
+                          inputValues,
+                          blockShape,
+                          padding,
+                          expectedOutputValues);
+}
+
+void SpaceToBatchNDUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 1, 2, 2, 3 };
+    std::vector<int32_t> expectedOutputShape { 4, 1, 1, 3 };
+
+    std::vector<uint8_t> inputValues { 1, 2, 3, 4, 5, 6, 7 };
+    std::vector<uint8_t> expectedOutputValues { 1, 2, 3, 4, 5, 6, 7 };
+
+    std::vector<unsigned int> blockShape({2, 2});
+    std::vector<std::pair<unsigned int, unsigned int>> padding = {{0, 0}, {0, 0}};
+
+    BatchSpaceTest<uint8_t>(tflite::BuiltinOperator_SPACE_TO_BATCH_ND,
+                            ::tflite::TensorType_UINT8,
+                            backends,
+                            inputShape,
+                            expectedOutputShape,
+                            inputValues,
+                            blockShape,
+                            padding,
+                            expectedOutputValues);
+}
+
+// BatchToSpaceND Tests
+TEST_SUITE("BatchToSpaceND_CpuAccTests")
+{
+
+TEST_CASE ("BatchToSpaceND_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    BatchToSpaceNDFp32Test(backends);
+}
+
+TEST_CASE ("BatchToSpaceND_Fp32_BatchOne_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    BatchToSpaceNDFp32BatchOneTest(backends);
+}
+
+TEST_CASE ("BatchToSpaceND_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    BatchToSpaceNDUint8Test(backends);
+}
+
+}
+
+TEST_SUITE("BatchToSpaceND_GpuAccTests")
+{
+
+TEST_CASE ("BatchToSpaceND_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    BatchToSpaceNDFp32Test(backends);
+}
+
+TEST_CASE ("BatchToSpaceND_Fp32_BatchOne_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    BatchToSpaceNDFp32BatchOneTest(backends);
+}
+
+TEST_CASE ("BatchToSpaceND_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    BatchToSpaceNDUint8Test(backends);
+}
+
+}
+
+TEST_SUITE("BatchToSpaceND_CpuRefTests")
+{
+
+TEST_CASE ("BatchToSpaceND_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    BatchToSpaceNDFp32Test(backends);
+}
+
+TEST_CASE ("BatchToSpaceND_Fp32_BatchOne_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    BatchToSpaceNDFp32BatchOneTest(backends);
+}
+
+TEST_CASE ("BatchToSpaceND_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    BatchToSpaceNDUint8Test(backends);
+}
+
+}
+
+// SpaceToBatchND Tests
+TEST_SUITE("SpaceToBatchND_CpuAccTests")
+{
+
+TEST_CASE ("SpaceToBatchND_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    SpaceToBatchNDFp32Test(backends);
+}
+
+TEST_CASE ("SpaceToBatchND_Fp32_Padding_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    SpaceToBatchNDFp32PaddingTest(backends);
+}
+
+TEST_CASE ("SpaceToBatchND_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    SpaceToBatchNDUint8Test(backends);
+}
+
+}
+
+TEST_SUITE("SpaceToBatchND_GpuAccTests")
+{
+
+TEST_CASE ("SpaceToBatchND_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    SpaceToBatchNDFp32Test(backends);
+}
+
+TEST_CASE ("SpaceToBatchND_Fp32_Padding_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    SpaceToBatchNDFp32PaddingTest(backends);
+}
+
+TEST_CASE ("SpaceToBatchND_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    SpaceToBatchNDUint8Test(backends);
+}
+
+}
+
+TEST_SUITE("SpaceToBatchND_CpuRefTests")
+{
+
+TEST_CASE ("SpaceToBatchND_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    SpaceToBatchNDFp32Test(backends);
+}
+
+TEST_CASE ("SpaceToBatchND_Fp32_Padding_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    SpaceToBatchNDFp32PaddingTest(backends);
+}
+
+TEST_CASE ("SpaceToBatchND_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    SpaceToBatchNDUint8Test(backends);
+}
+
+}
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/BatchSpaceTestHelper.hpp b/delegate/test/BatchSpaceTestHelper.hpp
new file mode 100644
index 0000000..597139d
--- /dev/null
+++ b/delegate/test/BatchSpaceTestHelper.hpp
@@ -0,0 +1,218 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateBatchSpaceTfLiteModel(tflite::BuiltinOperator batchSpaceOperatorCode,
+                                              tflite::TensorType tensorType,
+                                              std::vector<int32_t>& inputTensorShape,
+                                              std::vector <int32_t>& outputTensorShape,
+                                              std::vector<unsigned int>& blockData,
+                                              std::vector<std::pair<unsigned int, unsigned int>>& cropsPadData,
+                                              float quantScale = 1.0f,
+                                              int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder);
+    buffers[1] = CreateBuffer(flatBufferBuilder);
+    buffers[2] = CreateBuffer(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(blockData.data()),
+                                                                  sizeof(int32_t) * blockData.size()));
+    buffers[3] = CreateBuffer(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cropsPadData.data()),
+                                                                  sizeof(int64_t) * cropsPadData.size()));
+    buffers[4] = CreateBuffer(flatBufferBuilder);
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::string cropsOrPadding =
+            batchSpaceOperatorCode == tflite::BuiltinOperator_BATCH_TO_SPACE_ND ? "crops" : "padding";
+
+    std::vector<int32_t> blockShape { 2 };
+    std::vector<int32_t> cropsOrPaddingShape { 2, 2 };
+
+    std::array<flatbuffers::Offset<Tensor>, 4> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(blockShape.data(),
+                                                                      blockShape.size()),
+                              ::tflite::TensorType_INT32,
+                              2,
+                              flatBufferBuilder.CreateString("block"),
+                              quantizationParameters);
+
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(cropsOrPaddingShape.data(),
+                                                                      cropsOrPaddingShape.size()),
+                              ::tflite::TensorType_INT32,
+                              3,
+                              flatBufferBuilder.CreateString(cropsOrPadding),
+                              quantizationParameters);
+
+    // Create output tensor
+    tensors[3] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              4,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // Create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
+    flatbuffers::Offset<void> operatorBuiltinOptions = 0;
+    switch (batchSpaceOperatorCode)
+    {
+        case tflite::BuiltinOperator_BATCH_TO_SPACE_ND:
+        {
+            operatorBuiltinOptionsType = tflite::BuiltinOptions_BatchToSpaceNDOptions;
+            operatorBuiltinOptions = CreateBatchToSpaceNDOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case tflite::BuiltinOperator_SPACE_TO_BATCH_ND:
+        {
+            operatorBuiltinOptionsType = tflite::BuiltinOptions_SpaceToBatchNDOptions;
+            operatorBuiltinOptions = CreateSpaceToBatchNDOptions(flatBufferBuilder).Union();
+            break;
+        }
+        default:
+            break;
+    }
+
+    const std::vector<int> operatorInputs{ {0, 1, 2} };
+    const std::vector<int> operatorOutputs{ 3 };
+    flatbuffers::Offset <Operator> batchSpaceOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{ {0, 1, 2} };
+    const std::vector<int> subgraphOutputs{ 3 };
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&batchSpaceOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: BatchSpace Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, batchSpaceOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void BatchSpaceTest(tflite::BuiltinOperator controlOperatorCode,
+                    tflite::TensorType tensorType,
+                    std::vector<armnn::BackendId>& backends,
+                    std::vector<int32_t>& inputShape,
+                    std::vector<int32_t>& expectedOutputShape,
+                    std::vector<T>& inputValues,
+                    std::vector<unsigned int>& blockShapeValues,
+                    std::vector<std::pair<unsigned int, unsigned int>>& cropsPaddingValues,
+                    std::vector<T>& expectedOutputValues,
+                    float quantScale = 1.0f,
+                    int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateBatchSpaceTfLiteModel(controlOperatorCode,
+                                                                tensorType,
+                                                                inputShape,
+                                                                expectedOutputShape,
+                                                                blockShapeValues,
+                                                                cropsPaddingValues,
+                                                                quantScale,
+                                                                quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
+                                        armnnDelegateInterpreter,
+                                        expectedOutputShape,
+                                        expectedOutputValues);
+
+    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/CastTest.cpp b/delegate/test/CastTest.cpp
new file mode 100644
index 0000000..d3c3e29
--- /dev/null
+++ b/delegate/test/CastTest.cpp
@@ -0,0 +1,95 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "CastTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void CastUint8ToFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  {1, 3, 2, 3};
+
+    std::vector<uint8_t> inputValues { 1, 3, 1, 3, 1, 3, 1, 3, 1,
+                                        3, 1, 3, 1, 2, 1, 3, 1, 3 };
+
+    std::vector<float> expectedOutputValues { 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f, 3.0f, 1.0f,
+                                              3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+
+    CastTest<uint8_t, float>(::tflite::TensorType_UINT8,
+                             ::tflite::TensorType_FLOAT32,
+                             backends,
+                             inputShape,
+                             inputValues,
+                             expectedOutputValues);
+}
+
+void CastInt32ToFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  {1, 3, 2, 3};
+
+    std::vector<int32_t> inputValues { -1, -3, -1, -3, -1, -3, -1, -3, 1,
+                                       3, 1, 3, 1, 2, 1, 3, 1, 3 };
+
+    std::vector<float> expectedOutputValues { -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, -1.0f, -3.0f, 1.0f,
+                                              3.0f, 1.0f, 3.0f, 1.0f, 2.0f, 1.0f, 3.0f, 1.0f, 3.0f };
+
+    CastTest<int32_t, float>(::tflite::TensorType_INT32,
+                             ::tflite::TensorType_FLOAT32,
+                             backends,
+                             inputShape,
+                             inputValues,
+                             expectedOutputValues);
+}
+
+// CAST Test Suite
+TEST_SUITE("CAST_CpuRefTests")
+{
+
+TEST_CASE ("CAST_UINT8_TO_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    CastUint8ToFp32Test(backends);
+}
+
+TEST_CASE ("CAST_INT32_TO_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    CastInt32ToFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("CAST_CpuAccTests")
+{
+
+TEST_CASE ("CAST_INT32_TO_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    CastInt32ToFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("CAST_GpuAccTests")
+{
+
+TEST_CASE ("CAST_INT32_TO_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    CastInt32ToFp32Test(backends);
+}
+
+}
+// End of CAST Test Suite
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/CastTestHelper.hpp b/delegate/test/CastTestHelper.hpp
new file mode 100644
index 0000000..be1967c
--- /dev/null
+++ b/delegate/test/CastTestHelper.hpp
@@ -0,0 +1,159 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+std::vector<char> CreateCastTfLiteModel(tflite::TensorType inputTensorType,
+                                        tflite::TensorType outputTensorType,
+                                        const std::vector <int32_t>& tensorShape,
+                                        float quantScale = 1.0f,
+                                        int quantOffset = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({quantScale}),
+                                     flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
+
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              inputTensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              outputTensorType,
+                              2,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    const std::vector<int32_t> operatorInputs({0});
+    const std::vector<int32_t> operatorOutputs({1});
+
+    flatbuffers::Offset<Operator> castOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       BuiltinOptions_CastOptions,
+                       CreateCastOptions(flatBufferBuilder).Union());
+
+    flatbuffers::Offset<flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: CAST Operator Model");
+    flatbuffers::Offset<OperatorCode> operatorCode =
+        CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_CAST);
+
+    const std::vector<int32_t> subgraphInputs({0});
+    const std::vector<int32_t> subgraphOutputs({1});
+    flatbuffers::Offset<SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&castOperator, 1));
+
+    flatbuffers::Offset<Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template<typename T, typename K>
+void CastTest(tflite::TensorType inputTensorType,
+              tflite::TensorType outputTensorType,
+              std::vector<armnn::BackendId>& backends,
+              std::vector<int32_t>& shape,
+              std::vector<T>& inputValues,
+              std::vector<K>& expectedOutputValues,
+              float quantScale = 1.0f,
+              int quantOffset = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateCastTfLiteModel(inputTensorType,
+                                                          outputTensorType,
+                                                          shape,
+                                                          quantScale,
+                                                          quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegate) == kTfLiteOk);
+    CHECK(armnnDelegate != nullptr);
+    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteDelegate) == kTfLiteOk);
+    CHECK(tfLiteDelegate != nullptr);
+    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<K>(tfLiteDelegate,
+                                        armnnDelegate,
+                                        shape,
+                                        expectedOutputValues,
+                                        0);
+
+    tfLiteDelegate.reset(nullptr);
+    armnnDelegate.reset(nullptr);
+}
+
+} // anonymous namespace
diff --git a/delegate/test/ComparisonTest.cpp b/delegate/test/ComparisonTest.cpp
new file mode 100644
index 0000000..b044c27
--- /dev/null
+++ b/delegate/test/ComparisonTest.cpp
@@ -0,0 +1,844 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ComparisonTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void EqualFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<float> input1Values =
+    {
+        1.f, 1.f, 1.f, 1.f, 3.f, 3.f, 3.f, 3.f,
+        5.f, 5.f, 5.f, 5.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<bool> expectedOutputValues =
+    {
+        1, 1, 1, 1, 0, 0, 0, 0,
+        0, 0, 0, 0, 1, 1, 1, 1
+    };
+
+
+    ComparisonTest<float>(tflite::BuiltinOperator_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void EqualBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+    // Set output data
+    std::vector<bool> expectedOutputValues
+    {
+        0, 0, 0, 1, 1, 1,
+        0, 0, 0, 0, 0, 0
+    };
+    ComparisonTest<float>(tflite::BuiltinOperator_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void EqualInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 4 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { 1, 0, 0, 1 };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_EQUAL,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void NotEqualFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        1.f, 1.f, 1.f, 1.f, 5.f, 5.f, 5.f, 5.f,
+        3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<float> input1Values =
+    {
+        1.f, 1.f, 1.f, 1.f, 3.f, 3.f, 3.f, 3.f,
+        5.f, 5.f, 5.f, 5.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<bool> expectedOutputValues =
+    {
+        0, 0, 0, 0, 1, 1, 1, 1,
+        1, 1, 1, 1, 0, 0, 0, 0
+    };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_NOT_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void NotEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+    // Set output data
+    std::vector<bool> expectedOutputValues
+    {
+        1, 1, 1, 0, 0, 0,
+        1, 1, 1, 1, 1, 1
+    };
+    ComparisonTest<float>(tflite::BuiltinOperator_NOT_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void NotEqualInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 4 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { 0, 1, 1, 0 };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_NOT_EQUAL,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void GreaterFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> input0Values = { 1, 5, 6, 4 };
+
+    std::vector<float> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { 0, 1, 0, 0 };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_GREATER,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void GreaterBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+
+    std::vector<bool> expectedOutputValues
+    {
+        0, 0, 0, 0, 0, 0,
+        1, 1, 1, 1, 1, 1
+    };
+    ComparisonTest<float>(tflite::BuiltinOperator_GREATER,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void GreaterInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 4 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { 0, 1, 0, 0 };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_GREATER,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void GreaterEqualFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> input0Values = { 1.f, 5.f, 6.f, 4.f };
+
+    std::vector<float> input1Values = { 1.f, 3.f, 9.f, 4.f };
+
+    std::vector<bool> expectedOutputValues = { true, true, false, true };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_GREATER_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void GreaterEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+    // Set output data
+    std::vector<bool> expectedOutputValues
+    {
+        0, 0, 0, 1, 1, 1,
+        1, 1, 1, 1, 1, 1
+    };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_GREATER_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void GreaterEqualInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 3 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { 1, 1, 0, 0 };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_GREATER_EQUAL,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void LessFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> input0Values = { 1.f, 5.f, 6.f, 4.f };
+
+    std::vector<float> input1Values = { 1.f, 3.f, 9.f, 4.f };
+
+    std::vector<bool> expectedOutputValues = { false, false, true, false };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_LESS,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void LessBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+
+    std::vector<bool> expectedOutputValues
+    {
+        true, true, true, false, false, false,
+        false, false, false, false, false, false
+    };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_LESS,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void LessInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 3 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { false, false, true, true };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_LESS,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void LessEqualFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> input0Values = { 1.f, 5.f, 6.f, 4.f };
+
+    std::vector<float> input1Values = { 1.f, 3.f, 9.f, 4.f };
+
+    std::vector<bool> expectedOutputValues = { true, false, true, true };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_LESS_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void LessEqualBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        1.f, 2.f, 3.f,  4.f,  5.f,  6.f,
+        7.f, 8.f, 9.f, 10.f, 11.f, 12.f
+    };
+    std::vector<float> input1Values { 4.f, 5.f, 6.f };
+
+    std::vector<bool> expectedOutputValues
+    {
+        true, true, true, true, true, true,
+        false, false, false, false, false, false
+    };
+
+    ComparisonTest<float>(tflite::BuiltinOperator_LESS_EQUAL,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          input0Shape,
+                          input1Shape,
+                          expectedOutputShape,
+                          input0Values,
+                          input1Values,
+                          expectedOutputValues);
+}
+
+void LessEqualInt32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<int32_t> input0Values = { 1, 5, 6, 3 };
+
+    std::vector<int32_t> input1Values = { 1, 3, 9, 4 };
+
+    std::vector<bool> expectedOutputValues = { true, false, true, true };
+
+    ComparisonTest<int32_t>(tflite::BuiltinOperator_LESS_EQUAL,
+                            ::tflite::TensorType_INT32,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+TEST_SUITE("Comparison_CpuRefTests")
+{
+
+TEST_CASE ("EQUAL_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    EqualFP32Test(backends);
+}
+
+TEST_CASE ("EQUAL_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    EqualBroadcastTest(backends);
+}
+
+TEST_CASE ("EQUAL_INT32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    EqualInt32Test(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    NotEqualFP32Test(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    NotEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_INT32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    NotEqualInt32Test(backends);
+}
+
+TEST_CASE ("GREATER_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    GreaterFP32Test(backends);
+}
+
+TEST_CASE ("GREATER_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    GreaterBroadcastTest(backends);
+}
+
+TEST_CASE ("GREATER_INT32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    GreaterInt32Test(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    GreaterEqualFP32Test(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    GreaterEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_INT32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    GreaterEqualInt32Test(backends);
+}
+
+TEST_CASE ("LESS_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LessFP32Test(backends);
+}
+
+TEST_CASE ("LESS_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LessBroadcastTest(backends);
+}
+
+TEST_CASE ("LESS_INT32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LessInt32Test(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LessEqualFP32Test(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LessEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_INT32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LessEqualInt32Test(backends);
+}
+} // End TEST_SUITE("Comparison_CpuRefTests")
+
+
+
+TEST_SUITE("Comparison_GpuAccTests")
+{
+
+TEST_CASE ("EQUAL_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    EqualFP32Test(backends);
+}
+
+TEST_CASE ("EQUAL_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    EqualBroadcastTest(backends);
+}
+
+TEST_CASE ("EQUAL_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    EqualInt32Test(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    NotEqualFP32Test(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    NotEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    NotEqualInt32Test(backends);
+}
+
+TEST_CASE ("GREATER_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterFP32Test(backends);
+}
+
+TEST_CASE ("GREATER_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterBroadcastTest(backends);
+}
+
+TEST_CASE ("GREATER_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc,
+                                               armnn::Compute::CpuRef };
+    GreaterInt32Test(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    GreaterEqualFP32Test(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    GreaterEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    GreaterEqualInt32Test(backends);
+}
+
+TEST_CASE ("LESS_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LessFP32Test(backends);
+}
+
+TEST_CASE ("LESS_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LessBroadcastTest(backends);
+}
+
+TEST_CASE ("LESS_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LessInt32Test(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LessEqualFP32Test(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LessEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_INT32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LessEqualInt32Test(backends);
+}
+
+} // End TEST_SUITE("Comparison_GpuAccTests")
+
+
+TEST_SUITE("Comparison_CpuAccTests")
+{
+
+TEST_CASE ("EQUAL_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    EqualFP32Test(backends);
+}
+
+TEST_CASE ("EQUAL_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    EqualBroadcastTest(backends);
+}
+
+TEST_CASE ("EQUAL_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    EqualInt32Test(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    NotEqualFP32Test(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    NotEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("NOT_EQUAL_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    NotEqualInt32Test(backends);
+}
+
+TEST_CASE ("GREATER_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    GreaterFP32Test(backends);
+}
+
+TEST_CASE ("GREATER_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    GreaterBroadcastTest(backends);
+}
+
+TEST_CASE ("GREATER_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    GreaterInt32Test(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    GreaterEqualFP32Test(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    GreaterEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("GREATER_EQUAL_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    GreaterEqualInt32Test(backends);
+}
+
+TEST_CASE ("LESS_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LessFP32Test(backends);
+}
+
+TEST_CASE ("LESS_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LessBroadcastTest(backends);
+}
+
+TEST_CASE ("LESS_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LessInt32Test(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LessEqualFP32Test(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LessEqualBroadcastTest(backends);
+}
+
+TEST_CASE ("LESS_EQUAL_INT32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LessEqualInt32Test(backends);
+}
+
+} // End TEST_SUITE("Comparison_CpuAccTests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ComparisonTestHelper.hpp b/delegate/test/ComparisonTestHelper.hpp
new file mode 100644
index 0000000..ef9f87a
--- /dev/null
+++ b/delegate/test/ComparisonTestHelper.hpp
@@ -0,0 +1,238 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateComparisonTfLiteModel(tflite::BuiltinOperator comparisonOperatorCode,
+                                              tflite::TensorType tensorType,
+                                              const std::vector <int32_t>& input0TensorShape,
+                                              const std::vector <int32_t>& input1TensorShape,
+                                              const std::vector <int32_t>& outputTensorShape,
+                                              float quantScale = 1.0f,
+                                              int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
+                                                                      input0TensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input_0"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
+                                                                      input1TensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("input_1"),
+                              quantizationParameters);
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              ::tflite::TensorType_BOOL,
+                              3);
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_EqualOptions;;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateEqualOptions(flatBufferBuilder).Union();
+    switch (comparisonOperatorCode)
+    {
+        case BuiltinOperator_EQUAL:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_EqualOptions;
+            operatorBuiltinOptions = CreateEqualOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_NOT_EQUAL:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_NotEqualOptions;
+            operatorBuiltinOptions = CreateNotEqualOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_GREATER:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_GreaterOptions;
+            operatorBuiltinOptions = CreateGreaterOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_GREATER_EQUAL:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_GreaterEqualOptions;
+            operatorBuiltinOptions = CreateGreaterEqualOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_LESS:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_LessOptions;
+            operatorBuiltinOptions = CreateLessOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_LESS_EQUAL:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_LessEqualOptions;
+            operatorBuiltinOptions = CreateLessEqualOptions(flatBufferBuilder).Union();
+            break;
+        }
+        default:
+            break;
+    }
+    const std::vector<int32_t> operatorInputs{0, 1};
+    const std::vector<int32_t> operatorOutputs{2};
+    flatbuffers::Offset <Operator> comparisonOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{0, 1};
+    const std::vector<int> subgraphOutputs{2};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&comparisonOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Comparison Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, comparisonOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void ComparisonTest(tflite::BuiltinOperator comparisonOperatorCode,
+                    tflite::TensorType tensorType,
+                    std::vector<armnn::BackendId>& backends,
+                    std::vector<int32_t>& input0Shape,
+                    std::vector<int32_t>& input1Shape,
+                    std::vector<int32_t>& outputShape,
+                    std::vector<T>& input0Values,
+                    std::vector<T>& input1Values,
+                    std::vector<bool>& expectedOutputValues,
+                    float quantScale = 1.0f,
+                    int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateComparisonTfLiteModel(comparisonOperatorCode,
+                                                                tensorType,
+                                                                input0Shape,
+                                                                input1Shape,
+                                                                outputShape,
+                                                                quantScale,
+                                                                quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput0Id);
+    for (unsigned int i = 0; i < input0Values.size(); ++i)
+    {
+        tfLiteDelageInput0Data[i] = input0Values[i];
+    }
+
+    auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1];
+    auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput1Id);
+    for (unsigned int i = 0; i < input1Values.size(); ++i)
+    {
+        tfLiteDelageInput1Data[i] = input1Values[i];
+    }
+
+    auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput0Id);
+    for (unsigned int i = 0; i < input0Values.size(); ++i)
+    {
+        armnnDelegateInput0Data[i] = input0Values[i];
+    }
+
+    auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
+    auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput1Id);
+    for (unsigned int i = 0; i < input1Values.size(); ++i)
+    {
+        armnnDelegateInput1Data[i] = input1Values[i];
+    }
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<bool>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<bool>(armnnDelegateOutputId);
+
+    armnnDelegate::CompareData(expectedOutputValues  , armnnDelegateOutputData, expectedOutputValues.size());
+    armnnDelegate::CompareData(expectedOutputValues  , tfLiteDelageOutputData , expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelageOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ControlTest.cpp b/delegate/test/ControlTest.cpp
new file mode 100644
index 0000000..ec7ff88
--- /dev/null
+++ b/delegate/test/ControlTest.cpp
@@ -0,0 +1,420 @@
+//
+// Copyright © 2020,2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ControlTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+// CONCATENATION Operator
+void ConcatUint8TwoInputsTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 4, 2 };
+
+    // Set input and output data
+    std::vector<std::vector<uint8_t>> inputValues;
+    std::vector<uint8_t> inputValue1 { 0, 1, 2, 3 }; // Lower bounds
+    std::vector<uint8_t> inputValue2 { 252, 253, 254, 255 }; // Upper bounds
+    inputValues.push_back(inputValue1);
+    inputValues.push_back(inputValue2);
+
+    std::vector<uint8_t> expectedOutputValues { 0, 1, 2, 3, 252, 253, 254, 255 };
+
+    ConcatenationTest<uint8_t>(tflite::BuiltinOperator_CONCATENATION,
+                               ::tflite::TensorType_UINT8,
+                               backends,
+                               inputShape,
+                               expectedOutputShape,
+                               inputValues,
+                               expectedOutputValues);
+}
+
+void ConcatInt16TwoInputsTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 4, 2 };
+
+    std::vector<std::vector<int16_t>> inputValues;
+    std::vector<int16_t> inputValue1 { -32768, -16384, -1, 0 };
+    std::vector<int16_t> inputValue2 { 1, 2, 16384, 32767 };
+    inputValues.push_back(inputValue1);
+    inputValues.push_back(inputValue2);
+
+    std::vector<int16_t> expectedOutputValues { -32768, -16384, -1, 0, 1, 2, 16384, 32767};
+
+    ConcatenationTest<int16_t>(tflite::BuiltinOperator_CONCATENATION,
+                               ::tflite::TensorType_INT16,
+                               backends,
+                               inputShape,
+                               expectedOutputShape,
+                               inputValues,
+                               expectedOutputValues);
+}
+
+void ConcatFloat32TwoInputsTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 4, 2 };
+
+    std::vector<std::vector<float>> inputValues;
+    std::vector<float> inputValue1 { -127.f, -126.f, -1.f, 0.f };
+    std::vector<float> inputValue2 { 1.f, 2.f, 126.f, 127.f };
+    inputValues.push_back(inputValue1);
+    inputValues.push_back(inputValue2);
+
+    std::vector<float> expectedOutputValues { -127.f, -126.f, -1.f, 0.f, 1.f, 2.f, 126.f, 127.f };
+
+    ConcatenationTest<float>(tflite::BuiltinOperator_CONCATENATION,
+                             ::tflite::TensorType_FLOAT32,
+                             backends,
+                             inputShape,
+                             expectedOutputShape,
+                             inputValues,
+                             expectedOutputValues);
+}
+
+void ConcatThreeInputsTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 6, 2 };
+
+    std::vector<std::vector<uint8_t>> inputValues;
+    std::vector<uint8_t> inputValue1 { 0, 1, 2, 3 };
+    std::vector<uint8_t> inputValue2 { 125, 126, 127, 128 };
+    std::vector<uint8_t> inputValue3 { 252, 253, 254, 255 };
+    inputValues.push_back(inputValue1);
+    inputValues.push_back(inputValue2);
+    inputValues.push_back(inputValue3);
+
+    std::vector<uint8_t> expectedOutputValues { 0, 1, 2, 3, 125, 126, 127, 128, 252, 253, 254, 255 };
+
+    ConcatenationTest<uint8_t>(tflite::BuiltinOperator_CONCATENATION,
+                               ::tflite::TensorType_UINT8,
+                               backends,
+                               inputShape,
+                               expectedOutputShape,
+                               inputValues,
+                               expectedOutputValues);
+}
+
+void ConcatAxisTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 1, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 4 };
+
+    std::vector<std::vector<uint8_t>> inputValues;
+    std::vector<uint8_t> inputValue1 { 0, 1, 2, 3 };
+    std::vector<uint8_t> inputValue3 { 252, 253, 254, 255 };
+    inputValues.push_back(inputValue1);
+    inputValues.push_back(inputValue3);
+
+    std::vector<uint8_t> expectedOutputValues { 0, 1, 252, 253, 2, 3, 254, 255 };
+
+    ConcatenationTest<uint8_t>(tflite::BuiltinOperator_CONCATENATION,
+                               ::tflite::TensorType_UINT8,
+                               backends,
+                               inputShape,
+                               expectedOutputShape,
+                               inputValues,
+                               expectedOutputValues,
+                               2);
+}
+
+// MEAN Operator
+void MeanUint8KeepDimsTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 3 };
+    std::vector<int32_t> input1Shape { 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 1 };
+
+    std::vector<uint8_t> input0Values { 5, 10, 15 }; // Inputs
+    std::vector<int32_t> input1Values { 1 }; // Axis
+
+    std::vector<uint8_t> expectedOutputValues { 10 };
+
+    MeanTest<uint8_t>(tflite::BuiltinOperator_MEAN,
+                      ::tflite::TensorType_UINT8,
+                      backends,
+                      input0Shape,
+                      input1Shape,
+                      expectedOutputShape,
+                      input0Values,
+                      input1Values,
+                      expectedOutputValues,
+                      true);
+}
+
+void MeanUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1 };
+    std::vector<int32_t> expectedOutputShape { 2, 2 };
+
+    std::vector<uint8_t> input0Values { 5, 10, 15, 20 }; // Inputs
+    std::vector<int32_t> input1Values { 0 }; // Axis
+
+    std::vector<uint8_t> expectedOutputValues { 5, 10, 15, 20 };
+
+    MeanTest<uint8_t>(tflite::BuiltinOperator_MEAN,
+                      ::tflite::TensorType_UINT8,
+                      backends,
+                      input0Shape,
+                      input1Shape,
+                      expectedOutputShape,
+                      input0Values,
+                      input1Values,
+                      expectedOutputValues,
+                      false);
+}
+
+void MeanFp32KeepDimsTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 1, 2 };
+
+    std::vector<float>   input0Values { 1.0f, 1.5f, 2.0f, 2.5f }; // Inputs
+    std::vector<int32_t> input1Values { 1 }; // Axis
+
+    std::vector<float>   expectedOutputValues { 1.5f, 2.0f };
+
+    MeanTest<float>(tflite::BuiltinOperator_MEAN,
+                    ::tflite::TensorType_FLOAT32,
+                    backends,
+                    input0Shape,
+                    input1Shape,
+                    expectedOutputShape,
+                    input0Values,
+                    input1Values,
+                    expectedOutputValues,
+                    true);
+}
+
+void MeanFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 1 };
+
+    std::vector<float>   input0Values { 1.0f, 1.5f, 2.0f, 2.5f }; // Inputs
+    std::vector<int32_t> input1Values { 2 }; // Axis
+
+    std::vector<float>   expectedOutputValues { 1.25f, 2.25f };
+
+    MeanTest<float>(tflite::BuiltinOperator_MEAN,
+                    ::tflite::TensorType_FLOAT32,
+                    backends,
+                    input0Shape,
+                    input1Shape,
+                    expectedOutputShape,
+                    input0Values,
+                    input1Values,
+                    expectedOutputValues,
+                    false);
+}
+
+// CONCATENATION Tests.
+TEST_SUITE("Concatenation_CpuAccTests")
+{
+
+TEST_CASE ("Concatenation_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    ConcatUint8TwoInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Int16_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    ConcatInt16TwoInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Float32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    ConcatFloat32TwoInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Three_Inputs_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    ConcatThreeInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Axis_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    ConcatAxisTest(backends);
+}
+
+}
+
+TEST_SUITE("Concatenation_GpuAccTests")
+{
+
+TEST_CASE ("Concatenation_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    ConcatUint8TwoInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Int16_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    ConcatInt16TwoInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Float32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    ConcatFloat32TwoInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Three_Inputs_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    ConcatThreeInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Axis_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    ConcatAxisTest(backends);
+}
+
+}
+
+TEST_SUITE("Concatenation_CpuRefTests")
+{
+
+TEST_CASE ("Concatenation_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    ConcatUint8TwoInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Int16_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    ConcatInt16TwoInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Float32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    ConcatFloat32TwoInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Three_Inputs_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    ConcatThreeInputsTest(backends);
+}
+
+TEST_CASE ("Concatenation_Axis_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    ConcatAxisTest(backends);
+}
+
+}
+
+// MEAN Tests
+TEST_SUITE("Mean_CpuAccTests")
+{
+
+TEST_CASE ("Mean_Uint8_KeepDims_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    MeanUint8KeepDimsTest(backends);
+}
+
+TEST_CASE ("Mean_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    MeanUint8Test(backends);
+}
+
+TEST_CASE ("Mean_Fp32_KeepDims_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    MeanFp32KeepDimsTest(backends);
+}
+
+TEST_CASE ("Mean_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    MeanFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("Mean_GpuAccTests")
+{
+
+TEST_CASE ("Mean_Uint8_KeepDims_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    MeanUint8KeepDimsTest(backends);
+}
+
+TEST_CASE ("Mean_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    MeanUint8Test(backends);
+}
+
+TEST_CASE ("Mean_Fp32_KeepDims_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    MeanFp32KeepDimsTest(backends);
+}
+
+TEST_CASE ("Mean_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    MeanFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("Mean_CpuRefTests")
+{
+
+TEST_CASE ("Mean_Uint8_KeepDims_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    MeanUint8KeepDimsTest(backends);
+}
+
+TEST_CASE ("Mean_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    MeanUint8Test(backends);
+}
+
+TEST_CASE ("Mean_Fp32_KeepDims_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    MeanFp32KeepDimsTest(backends);
+}
+
+TEST_CASE ("Mean_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    MeanFp32Test(backends);
+}
+
+}
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ControlTestHelper.hpp b/delegate/test/ControlTestHelper.hpp
new file mode 100644
index 0000000..f68cc07
--- /dev/null
+++ b/delegate/test/ControlTestHelper.hpp
@@ -0,0 +1,346 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+#include <string>
+
+namespace
+{
+
+std::vector<char> CreateConcatTfLiteModel(tflite::BuiltinOperator controlOperatorCode,
+                                          tflite::TensorType tensorType,
+                                          std::vector<int32_t>& inputTensorShape,
+                                          const std::vector <int32_t>& outputTensorShape,
+                                          const int32_t inputTensorNum,
+                                          int32_t axis = 0,
+                                          float quantScale = 1.0f,
+                                          int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::vector<int32_t> operatorInputs{};
+    const std::vector<int32_t> operatorOutputs{inputTensorNum};
+    std::vector<int> subgraphInputs{};
+    const std::vector<int> subgraphOutputs{inputTensorNum};
+
+    std::vector<flatbuffers::Offset<Tensor>> tensors(inputTensorNum + 1);
+    for (int i = 0; i < inputTensorNum; ++i)
+    {
+        tensors[i] = CreateTensor(flatBufferBuilder,
+                                  flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                          inputTensorShape.size()),
+                                  tensorType,
+                                  1,
+                                  flatBufferBuilder.CreateString("input" + std::to_string(i)),
+                                  quantizationParameters);
+
+        // Add number of inputs to vector.
+        operatorInputs.push_back(i);
+        subgraphInputs.push_back(i);
+    }
+
+    // Create output tensor
+    tensors[inputTensorNum] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ConcatenationOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateConcatenationOptions(flatBufferBuilder, axis).Union();
+
+    flatbuffers::Offset <Operator> controlOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&controlOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: Concatenation Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, controlOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+std::vector<char> CreateMeanTfLiteModel(tflite::BuiltinOperator controlOperatorCode,
+                                        tflite::TensorType tensorType,
+                                        std::vector<int32_t>& input0TensorShape,
+                                        std::vector<int32_t>& input1TensorShape,
+                                        const std::vector <int32_t>& outputTensorShape,
+                                        std::vector<int32_t>& axisData,
+                                        const bool keepDims,
+                                        float quantScale = 1.0f,
+                                        int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder);
+    buffers[1] = CreateBuffer(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
+                                                             sizeof(int32_t) * axisData.size()));
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
+                                                                      input0TensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
+                                                                      input1TensorShape.size()),
+                              ::tflite::TensorType_INT32,
+                              1,
+                              flatBufferBuilder.CreateString("axis"),
+                              quantizationParameters);
+
+    // Create output tensor
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // create operator. Mean uses ReducerOptions.
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ReducerOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateReducerOptions(flatBufferBuilder, keepDims).Union();
+
+    const std::vector<int> operatorInputs{ {0, 1} };
+    const std::vector<int> operatorOutputs{ 2 };
+    flatbuffers::Offset <Operator> controlOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{ {0, 1} };
+    const std::vector<int> subgraphOutputs{ 2 };
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&controlOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: Mean Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, controlOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void ConcatenationTest(tflite::BuiltinOperator controlOperatorCode,
+                       tflite::TensorType tensorType,
+                       std::vector<armnn::BackendId>& backends,
+                       std::vector<int32_t>& inputShapes,
+                       std::vector<int32_t>& expectedOutputShape,
+                       std::vector<std::vector<T>>& inputValues,
+                       std::vector<T>& expectedOutputValues,
+                       int32_t axis = 0,
+                       float quantScale = 1.0f,
+                       int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateConcatTfLiteModel(controlOperatorCode,
+                                                            tensorType,
+                                                            inputShapes,
+                                                            expectedOutputShape,
+                                                            inputValues.size(),
+                                                            axis,
+                                                            quantScale,
+                                                            quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data for all input tensors.
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        // Get single input tensor and assign to interpreters.
+        auto inputTensorValues = inputValues[i];
+        armnnDelegate::FillInput<T>(tfLiteInterpreter, i, inputTensorValues);
+        armnnDelegate::FillInput<T>(armnnDelegateInterpreter, i, inputTensorValues);
+    }
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
+                                        armnnDelegateInterpreter,
+                                        expectedOutputShape,
+                                        expectedOutputValues);
+
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+template <typename T>
+void MeanTest(tflite::BuiltinOperator controlOperatorCode,
+              tflite::TensorType tensorType,
+              std::vector<armnn::BackendId>& backends,
+              std::vector<int32_t>& input0Shape,
+              std::vector<int32_t>& input1Shape,
+              std::vector<int32_t>& expectedOutputShape,
+              std::vector<T>& input0Values,
+              std::vector<int32_t>& input1Values,
+              std::vector<T>& expectedOutputValues,
+              const bool keepDims,
+              float quantScale = 1.0f,
+              int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateMeanTfLiteModel(controlOperatorCode,
+                                                          tensorType,
+                                                          input0Shape,
+                                                          input1Shape,
+                                                          expectedOutputShape,
+                                                          input1Values,
+                                                          keepDims,
+                                                          quantScale,
+                                                          quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
+                                        armnnDelegateInterpreter,
+                                        expectedOutputShape,
+                                        expectedOutputValues);
+
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/Convolution2dTest.cpp b/delegate/test/Convolution2dTest.cpp
new file mode 100644
index 0000000..3459e68
--- /dev/null
+++ b/delegate/test/Convolution2dTest.cpp
@@ -0,0 +1,489 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ConvolutionTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void Conv2DWithBiasesFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 5, 5, 1 };
+    std::vector<int32_t> filterShape { 1, 3, 3, 1 };
+    std::vector<int32_t> biasShape { 1 };
+    std::vector<int32_t> outputShape { 1, 3, 3, 1 };
+
+    static std::vector<float> inputValues =
+        {
+            1, 5, 2, 3, 5,
+            8, 7, 3, 6, 3,
+            3, 3, 9, 1, 9,
+            4, 1, 8, 1, 3,
+            6, 8, 1, 9, 2
+        };
+
+    std::vector<float> filterValues =
+        {
+            4, 5, 6,
+            0, 0, 0,
+            3, 2, 1
+        };
+
+    std::vector<float> biasValues = { 0 };
+
+    std::vector<float> expectedOutputValues =
+        {
+            23, 33, 24,
+            91, 99, 48,
+            26, 50, 19
+        };
+
+    tflite::Padding padding = tflite::Padding_SAME;
+
+    ConvolutionTest<float>(tflite::BuiltinOperator_CONV_2D,
+                                 ::tflite::TensorType_FLOAT32,
+                                 2, // strideX
+                                 2, // strideY
+                                 1, // dilationX
+                                 1, // dilationY
+                                 padding,
+                                 tflite::ActivationFunctionType_NONE,
+                                 backends,
+                                 inputShape,
+                                 filterShape,
+                                 outputShape,
+                                 inputValues,
+                                 filterValues,
+                                 expectedOutputValues,
+                                 biasShape,
+                                 biasValues);
+}
+
+void Conv2DWithBiasesInt8Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 2, 2, 1 };
+    std::vector<int32_t> filterShape { 1, 2, 2, 1 };
+    std::vector<int32_t> biasShape { 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    static std::vector<int8_t> inputValues = { 1, 2, 3, 4 };
+
+    std::vector<int8_t> filterValues = { 2, 1, 0, 6 };
+
+    std::vector<int32_t> biasValues = { 10 };
+
+    std::vector<int8_t> expectedOutputValues =
+        {
+            (1 * 2 + 2 * 1 + 3 * 0 + 4 * 6 + 10) / 2, // 19
+            (2 * 2 + 0 * 1 + 4 * 0 + 0 * 6 + 10) / 2, // 7
+            (3 * 2 + 4 * 1 + 0 * 0 + 0 * 6 + 10) / 2, // 10
+            (4 * 2 + 0 * 1 + 0 * 0 + 0 * 6 + 10) / 2,  // 9
+        };
+
+    tflite::Padding padding = tflite::Padding_SAME;
+
+    ConvolutionTest<int8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
+                                            ::tflite::TensorType_INT8,
+                                            1, // strideX
+                                            1, // strideY
+                                            1, // dilationX
+                                            1, // dilationY
+                                            padding,
+                                            tflite::ActivationFunctionType_NONE,
+                                            backends,
+                                            inputShape,
+                                            filterShape,
+                                            outputShape,
+                                            inputValues,
+                                            filterValues,
+                                            expectedOutputValues,
+                                            biasShape,
+                                            biasValues);
+}
+
+void Conv2DWithBiasesReluUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 2, 2, 1 };
+    std::vector<int32_t> filterShape { 1, 2, 2, 1 };
+    std::vector<int32_t> biasShape { 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    static std::vector<uint8_t> inputValues = { 1, 2, 4, 8 };
+
+    std::vector<uint8_t> filterValues = { 2, 1, 0, 6 };
+
+    std::vector<int32_t> biasValues = { 16 };
+
+    // factors to consider:
+    // - the filter zero point is non zero, hence the (x-fz)
+    // - the output scale is 2 hence the /2
+    // - output zero point is non zero, hence the +outZero
+    // - RELU cuts negative values and then we add the output zero point
+    uint8_t bias = 16;
+    uint8_t outZero = 20;
+    uint8_t fz = 4; // filter zero point
+
+    std::vector<uint8_t> expectedOutputValues =
+        {
+            std::max(outZero, static_cast<uint8_t>((1*(2-fz) + 2*(1-fz) + 4*(0-fz) + 8*(6-fz) + bias)/2 + outZero)),
+            std::max(outZero, static_cast<uint8_t>((2*(2-fz) + 0*(1-fz) + 8*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
+            std::max(outZero, static_cast<uint8_t>((4*(2-fz) + 8*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero)),
+            std::max(outZero, static_cast<uint8_t>((8*(2-fz) + 0*(1-fz) + 0*(0-fz) + 0*(6-fz) + bias)/2 + outZero))
+        };
+
+    tflite::Padding padding = tflite::Padding_SAME;
+
+    ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
+                                            ::tflite::TensorType_UINT8,
+                                            1, // strideX
+                                            1, // strideY
+                                            1, // dilationX
+                                            1, // dilationY
+                                            padding,
+                                            tflite::ActivationFunctionType_RELU,
+                                            backends,
+                                            inputShape,
+                                            filterShape,
+                                            outputShape,
+                                            inputValues,
+                                            filterValues,
+                                            expectedOutputValues,
+                                            biasShape,
+                                            biasValues,
+                                            {1.0f}, // biasScale
+                                            {0},    // biasOffset
+                                            {1.0f}, // filterScale
+                                            {4},    // filterOffsets
+                                            2, // output scale
+                                            20); // output offset
+}
+
+void Conv2DWithBiasesRelu6Uint8Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 2, 2, 1 };
+    std::vector<int32_t> filterShape { 1, 2, 2, 1 };
+    std::vector<int32_t> biasShape { 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    static std::vector<uint8_t> inputValues = { 1, 2, 4, 1 };
+
+    std::vector<uint8_t> filterValues = { 2, 1, 0, 6 };
+
+    std::vector<int32_t> biasValues = { 0 };
+
+    // factors to consider:
+    // - the output scale is 2 hence the /2
+    // - RELU6 cuts output values at +6
+    uint8_t relu6Min = 6 / 2; // divide by output scale
+
+    std::vector<uint8_t> expectedOutputValues =
+        {
+            std::min(relu6Min, static_cast<uint8_t>((1 * 2 + 2 * 1 + 4 * 0 + 1 * 6) / 2)),
+            std::min(relu6Min, static_cast<uint8_t>((2 * 2 + 0 * 1 + 1 * 0 + 0 * 6) / 2)),
+            std::min(relu6Min, static_cast<uint8_t>((4 * 2 + 1 * 1 + 0 * 0 + 0 * 6) / 2)),
+            std::min(relu6Min, static_cast<uint8_t>((1 * 2 + 0 * 1 + 0 * 0 + 0 * 6) / 2))
+        };
+
+    tflite::Padding padding = tflite::Padding_SAME;
+
+    ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
+                                            ::tflite::TensorType_UINT8,
+                                            1, // strideX
+                                            1, // strideY
+                                            1, // dilationX
+                                            1, // dilationY
+                                            padding,
+                                            tflite::ActivationFunctionType_RELU6,
+                                            backends,
+                                            inputShape,
+                                            filterShape,
+                                            outputShape,
+                                            inputValues,
+                                            filterValues,
+                                            expectedOutputValues,
+                                            biasShape,
+                                            biasValues);
+}
+
+
+void Conv2DPerChannelInt8Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape  { 1,4,4,2 };
+    std::vector<int32_t> filterShape { 4,2,2,2 };
+    std::vector<int32_t> biasShape   { 4 };
+    std::vector<int32_t> outputShape { 1,4,4,4 };
+
+    static std::vector<int8_t> inputValues =
+        {
+            -11, 40,-26, 11,-28,  8,  0, -8,
+            -10, 34, 47,  0,-33,-14, 28, 35,
+              6,-28,-26,  8, 13, 33,-31,-41,
+             31,-20,-31,-16,  8,-18,-44,  0
+        };
+
+    std::vector<float>  filterScales = { 1.858268, 2.0, 1.992126, 1.905512 };
+    int32_t filterQuantizationDim    = 0;
+    std::vector<int8_t> filterValues =
+        {
+             13,-44,  5,-14, 21,-45, 36,-25,
+            -42, -2, 24,-30,-31, 35, 43,-30,
+            -20, -5, 25, 17, 18, 20,  4,-46,
+            -49,  9, -3,-20, 46,  5,  7,-15
+        };
+
+    std::vector<int32_t> biasValues = { 0,0,0,0 };
+    std::vector<float>   biasScales = { 0.721445, 0.7764700055, 0.773414, 0.739787 };
+
+    std::vector<int8_t> expectedOutputValues =
+        {
+               -1,  9,  3, 5, 1, -1,  5,  9,
+                2,  7, -1, 2, 2,  4,  5,  6,
+                1,  1,  4, 4, 2,  0, -4, -3,
+                0,  6, 12, 6, 3,  0, -1, -2,
+                7, -4,  4, 4, 3,  6,  6,  2,
+                0, -3, -1, 4, 4,  8,  3,  1,
+                5,  0,  0, 1, 4,  7,  4,  6,
+                4,  0,  1, 2, 2,  7,  5,  7
+        };
+    float outputQuantScale  = 401.960785f;
+    int   outputQuantOffset = 3;
+    float inputQuantScale   = 0.388235f;
+    int   inputQuantOffset  = 1;
+
+    tflite::Padding padding = tflite::Padding_SAME;
+
+    ConvolutionTest<int8_t, int32_t>(tflite::BuiltinOperator_CONV_2D,
+                                            ::tflite::TensorType_INT8,
+                                            1, // strideX
+                                            1, // strideY
+                                            1, // dilationX
+                                            1, // dilationY
+                                            padding,
+                                            tflite::ActivationFunctionType_NONE,
+                                            backends,
+                                            inputShape,
+                                            filterShape,
+                                            outputShape,
+                                            inputValues,
+                                            filterValues,
+                                            expectedOutputValues,
+                                            biasShape,
+                                            biasValues,
+                                            biasScales,
+                                            {0,0,0,0},
+                                            filterScales,
+                                            {0,0,0,0},
+                                            outputQuantScale,
+                                            outputQuantOffset,
+                                            inputQuantScale,
+                                            inputQuantOffset,
+                                            1, // depth_multiplier is ignored for conv2d value doesn't matter
+                                            filterQuantizationDim);
+}
+
+TEST_SUITE("Convolution2dTest_CpuRefTests")
+{
+
+TEST_CASE ("Conv2DWithBiases_Fp32_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    Conv2DWithBiasesFp32Test(backends);
+}
+
+TEST_CASE ("Conv2DWithBiases_Int8_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    Conv2DWithBiasesInt8Test(backends);
+}
+
+TEST_CASE ("Conv2DPerChannel_Int8_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    Conv2DPerChannelInt8Test(backends);
+}
+
+} //End of TEST_SUITE("Convolution2dTest_CpuRef")
+
+TEST_SUITE("Convolution2dTest_CpuAccTests")
+{
+
+TEST_CASE ("Conv2DWithBiases_Fp32_CpuAcc_Test")
+{
+std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+Conv2DWithBiasesFp32Test(backends);
+}
+
+TEST_CASE ("Conv2DWithBiases_Int8_CpuAcc_Test")
+{
+std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+Conv2DWithBiasesInt8Test(backends);
+}
+
+TEST_CASE ("Conv2DPerChannel_Int8_CpuAcc_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    Conv2DPerChannelInt8Test(backends);
+}
+
+} //End of TEST_SUITE("Convolution2dTest_CpuAcc")
+
+TEST_SUITE("Convolution2dTest_GpuAccTests")
+{
+
+TEST_CASE ("Conv2DWithBiases_Fp32_GpuAcc_Test")
+{
+std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+Conv2DWithBiasesFp32Test(backends);
+}
+
+TEST_CASE ("Conv2DWithBiases_Int8_GpuAcc_Test")
+{
+std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+Conv2DWithBiasesInt8Test(backends);
+}
+
+TEST_CASE ("Conv2DPerChannel_Int8_GpuAcc_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    Conv2DPerChannelInt8Test(backends);
+}
+
+} //End of TEST_SUITE("Convolution2dTest_GpuAcc")
+
+void TransposeConvInt8Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> transposeTensorShape { 4 };
+    std::vector<int32_t> filterShape { 1, 2, 2, 1 };
+    std::vector<int32_t> inputShape { 1, 2, 2, 1 };
+    std::vector<int32_t> outputShape { 1, 3, 3, 1 };
+
+    std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
+    static std::vector<int8_t> inputValues = { 1, 2, 3, 4 };
+    std::vector<int8_t> filterValues = { 0, 1, 2, 4 };
+    std::vector<int8_t> expectedOutputValues =
+        {
+            0, 1,  2,
+            2, 11, 12,
+            6, 20, 16
+        };
+
+    tflite::Padding padding = tflite::Padding_VALID;
+    TransposeConvTest<int8_t>(backends,
+                             ::tflite::TensorType_INT8,
+                             1, // strideX
+                             1, // strideY
+                             padding,
+                             transposeTensorShape,
+                             filterShape,
+                             inputShape,
+                             outputShape,
+                             transposeData,
+                             filterValues,
+                             inputValues,
+                             expectedOutputValues);
+}
+
+void TransposeConvFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> transposeTensorShape { 4 };
+    std::vector<int32_t> filterShape { 1, 2, 2, 1 };
+    std::vector<int32_t> inputShape { 1, 2, 2, 1 };
+    std::vector<int32_t> outputShape { 1, 3, 3, 1 };
+
+    std::vector<int32_t> transposeData = { 1, 3, 3, 1 };
+    static std::vector<float> inputValues = { 1, 2, 3, 4 };
+    std::vector<float> filterValues = { 0, 1, 2, 4 };
+    std::vector<float> expectedOutputValues =
+        {
+            0, 1,  2,
+            2, 11, 12,
+            6, 20, 16
+        };
+
+    tflite::Padding padding = tflite::Padding_VALID;
+    TransposeConvTest<float>(backends,
+                             ::tflite::TensorType_FLOAT32,
+                             1, // strideX
+                             1, // strideY
+                             padding,
+                             transposeTensorShape,
+                             filterShape,
+                             inputShape,
+                             outputShape,
+                             transposeData,
+                             filterValues,
+                             inputValues,
+                             expectedOutputValues);
+}
+
+TEST_SUITE("TransposeConv_CpuRef_Test")
+{
+
+TEST_CASE ("TransposeConv_CpuRef_Fp32_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    TransposeConvFp32Test(backends);
+}
+
+TEST_CASE ("TransposeConv_CpuRef_Int8_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    TransposeConvInt8Test(backends);
+}
+
+} // End of  TEST_SUITE(TransposeConv_CpuRef_Test)
+
+TEST_SUITE("TransposeConv_CpuAcc_Test")
+{
+
+TEST_CASE ("TransposeConv_CpuAcc_Fp32_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    TransposeConvFp32Test(backends);
+}
+
+TEST_CASE ("TransposeConv_CpuAcc_Int8_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    TransposeConvInt8Test(backends);
+}
+
+} // End of  TEST_SUITE(TransposeConv_CpuAcc_Test)
+
+TEST_SUITE("TransposeConv_GpuAcc_Test")
+{
+
+TEST_CASE ("TransposeConv_GpuAcc_Fp32_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    TransposeConvFp32Test(backends);
+}
+
+TEST_CASE ("TransposeConv_GpuAcc_Int8_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    TransposeConvInt8Test(backends);
+}
+
+} // End of  TEST_SUITE(TransposeConv_GpuAcc_Test)
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/Convolution3dTest.cpp b/delegate/test/Convolution3dTest.cpp
new file mode 100644
index 0000000..fe987be
--- /dev/null
+++ b/delegate/test/Convolution3dTest.cpp
@@ -0,0 +1,318 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ConvolutionTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+// Conv3d is currently only supports Float32 inputs, filter, bias and outputs in TFLite.
+// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
+#if defined(ARMNN_POST_TFLITE_2_5)
+
+// Create a vector from 0 to size divided to create smaller floating point values.
+template <typename T>
+std::vector<T> CreateFloatData(int32_t size, float divisor)
+{
+    std::vector<float> data;
+    for (int32_t i = 0; i < size; ++i)
+    {
+        float value = static_cast<float>(i);
+        data.push_back(value/divisor);
+    }
+    return data;
+}
+
+void Conv3DWithBiasesSimpleWithPaddingFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 2, 2, 2, 1 };
+    std::vector<int32_t> filterShape { 2, 2, 2, 1, 1 };
+    std::vector<int32_t> biasShape { 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 2, 1 };
+
+    static std::vector<float> inputValues =
+    {
+        1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f
+    };
+
+    std::vector<float> filterValues =
+    {
+        2.f,1.f, 1.f,0.f, 0.f,1.f, 1.f,1.f
+    };
+
+    std::vector<float> biasValues = { 5.f };
+
+    std::vector<float> expectedOutputValues =
+    {
+       33.f, 21.f, 23.f, 13.f, 28.f, 25.f, 27.f, 21.f
+    };
+
+    Convolution3dTest<float>(tflite::BuiltinOperator_CONV_3D,
+                             ::tflite::TensorType_FLOAT32,
+                             { 1, 1, 1 }, // strideX, strideY, strideZ
+                             { 1, 1, 1 }, // dilationX, dilationY, dilationZ
+                             tflite::Padding_SAME,
+                             tflite::ActivationFunctionType_NONE,
+                             backends,
+                             inputShape,
+                             filterShape,
+                             outputShape,
+                             inputValues,
+                             filterValues,
+                             expectedOutputValues,
+                             biasShape,
+                             biasValues);
+}
+
+void Conv3DWithBiasesStridesFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 1, 3, 10, 10, 1 };
+    std::vector<int32_t> filterShape { 3, 5, 5, 1, 1 };
+    std::vector<int32_t> biasShape { 1 };
+    std::vector<int32_t> outputShape { 1, 1, 3, 3, 1 };
+
+    std::vector<float> inputValues = CreateFloatData<float>(300, 1.0f);
+
+    std::vector<float> filterValues =
+    {
+        1.f, 1.f, 1.f, 1.f, 1.f,
+        1.f, 1.f, 1.f, 1.f, 1.f,
+        1.f, 1.f, 1.f, 1.f, 1.f,
+        1.f, 1.f, 1.f, 1.f, 1.f,
+        1.f, 1.f, 1.f, 1.f, 1.f,
+
+        0.f, 0.f, 0.f, 0.f, 0.f,
+        0.f, 0.f, 0.f, 0.f, 0.f,
+        0.f, 0.f, 0.f, 0.f, 0.f,
+        0.f, 0.f, 0.f, 0.f, 0.f,
+        0.f, 0.f, 0.f, 0.f, 0.f,
+
+        2.f, 2.f, 2.f, 2.f, 2.f,
+        2.f, 2.f, 2.f, 2.f, 2.f,
+        2.f, 2.f, 2.f, 2.f, 2.f,
+        2.f, 2.f, 2.f, 2.f, 2.f,
+        2.f, 2.f, 2.f, 2.f, 2.f
+    };
+
+    std::vector<float> biasValues = { 10.f };
+
+    std::vector<float> expectedOutputValues =
+    {
+        11660.f, 11810.f, 11960.f,
+
+        13160.f, 13310.f, 13460.f,
+
+        14660.f, 14810.f, 14960.f
+    };
+
+    Convolution3dTest<float>(tflite::BuiltinOperator_CONV_3D,
+                             ::tflite::TensorType_FLOAT32,
+                             { 2, 2, 2 }, // strideX, strideY, strideZ
+                             { 1, 1, 1 }, // dilationX, dilationY, dilationZ
+                             tflite::Padding_VALID,
+                             tflite::ActivationFunctionType_NONE,
+                             backends,
+                             inputShape,
+                             filterShape,
+                             outputShape,
+                             inputValues,
+                             filterValues,
+                             expectedOutputValues,
+                             biasShape,
+                             biasValues);
+}
+
+
+void Conv3DWithBiasesDilationFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 1, 5, 5, 5, 2 };
+    std::vector<int32_t> filterShape { 2, 2, 2, 2, 2 };
+    std::vector<int32_t> biasShape { 2 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 2, 2 };
+
+    std::vector<float> inputValues = CreateFloatData<float>(250, 1.0f);
+
+    std::vector<float> filterValues =
+    {
+        -1.f, -1.f,  -1.f, -1.f,  -1.f, -1.f,  -1.f, -1.f,  -1.f, -1.f,  -1.f,  1.f,   1.f,  1.f,  -1.f, -1.f,
+         1.f,  1.f,  -1.f,  1.f,  -1.f,  1.f,  -1.f,  1.f,  -1.f, -1.f,  -1.f,  1.f,  -1.f,  1.f,  -1.f,  1.f,
+    };
+
+    std::vector<float> biasValues = { 0.f, 2.f };
+
+    // Since the dilation rate is 3 this will dilate the kernel to be 4x4,
+    // therefore the output will be 2x2
+    std::vector<float> expectedOutputValues =
+    {
+        -1124.f, 976.f,
+        -1148.f, 980.f,
+
+        -1244.f, 996.f,
+        -1268.f, 1000.f,
+
+        -1724.f, 1076.f,
+        -1748.f, 1080.f,
+
+        -1844.f, 1096.f,
+        -1868.f, 1100.f
+    };
+
+    Convolution3dTest<float>(tflite::BuiltinOperator_CONV_3D,
+                             ::tflite::TensorType_FLOAT32,
+                             { 1, 1, 1 }, // strideX, strideY, strideZ
+                             { 3, 3, 3 }, // dilationX, dilationY, dilationZ
+                             tflite::Padding_VALID,
+                             tflite::ActivationFunctionType_NONE,
+                             backends,
+                             inputShape,
+                             filterShape,
+                             outputShape,
+                             inputValues,
+                             filterValues,
+                             expectedOutputValues,
+                             biasShape,
+                             biasValues);
+}
+
+void Conv3DFp32SmallTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 1, 3, 10, 10, 1 };
+    std::vector<int32_t> filterShape { 3, 3, 3, 1, 1 };
+    std::vector<int32_t> biasShape { 1 };
+    std::vector<int32_t> outputShape { 1, 1, 4, 4, 1 };
+
+    std::vector<float> inputValues = CreateFloatData<float>(300, 100.0f);
+
+    std::vector<float> filterValues =
+    {
+         0.125977f,  0.150391f,  0.101562f,
+         0.0585938f, 0.0864258f, 0.043457f,
+         0.034668f,  0.0322266f, 0.0385742f,
+
+         0.125977f,  0.150391f, -0.101562f,
+        -0.0585938f,-0.0864258f,-0.043457f,
+        -0.0104630f, 0.0154114f, 0.0013768f,
+
+         0.0344238f, 0.035644f,  0.0495605f,
+         0.0683594f, 0.099121f, -0.0461426f,
+        -0.0996094f,-0.126953f, -0.043457f,
+    };
+
+    std::vector<float> biasValues = { 0 };
+
+    std::vector<float> expectedOutputValues =
+    {
+        -0.08156067f, -0.06891209f, -0.05589598f, -0.04310101f,
+         0.04584253f,  0.05855697f,  0.07129729f,  0.08325434f,
+         0.17304349f,  0.18521416f,  0.19818866f,  0.21096253f,
+         0.29965734f,  0.312698f,    0.32547557f,  0.33818722f
+    };
+
+    Convolution3dTest<float>(tflite::BuiltinOperator_CONV_3D,
+                             ::tflite::TensorType_FLOAT32,
+                             { 2, 2, 2 }, // strideX, strideY, strideZ
+                             { 1, 1, 1 }, // dilationX, dilationY, dilationZ
+                             tflite::Padding_VALID,
+                             tflite::ActivationFunctionType_NONE,
+                             backends,
+                             inputShape,
+                             filterShape,
+                             outputShape,
+                             inputValues,
+                             filterValues,
+                             expectedOutputValues,
+                             biasShape,
+                             biasValues);
+}
+
+TEST_SUITE("Convolution3dTest_CpuRefTests")
+{
+
+TEST_CASE ("Conv3DWithBiasesSimpleWithPadding_Fp32_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    Conv3DWithBiasesSimpleWithPaddingFp32Test(backends);
+}
+
+TEST_CASE ("Conv3DWithBiasesStrides_Fp32_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    Conv3DWithBiasesStridesFp32Test(backends);
+}
+
+TEST_CASE ("Conv3DWithBiasesDilation_Fp32_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    Conv3DWithBiasesDilationFp32Test(backends);
+}
+
+TEST_CASE ("Conv3DFp32Small_Fp32_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    Conv3DFp32SmallTest(backends);
+}
+
+} //End of TEST_SUITE("Convolution3dTest_CpuRefTests")
+
+TEST_SUITE("Convolution3dTest_CpuAccTests")
+{
+
+TEST_CASE ("Conv3DWithBiasesSimpleWithPadding_Fp32_CpuAcc_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    Conv3DWithBiasesSimpleWithPaddingFp32Test(backends);
+}
+
+TEST_CASE ("Conv3DWithBiasesStrides_Fp32_CpuAcc_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    Conv3DWithBiasesStridesFp32Test(backends);
+}
+
+TEST_CASE ("Conv3DFp32Small_Fp32_CpuAcc_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    Conv3DFp32SmallTest(backends);
+}
+
+} //End of TEST_SUITE("Convolution3dTest_CpuAccTests")
+
+TEST_SUITE("Convolution3dTest_GpuAccTests")
+{
+
+TEST_CASE ("Conv3DWithBiasesSimpleWithPadding_Fp32_GpuAcc_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    Conv3DWithBiasesSimpleWithPaddingFp32Test(backends);
+}
+
+TEST_CASE ("Conv3DWithBiasesStrides_Fp32_GpuAcc_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    Conv3DWithBiasesStridesFp32Test(backends);
+}
+
+TEST_CASE ("Conv3DFp32Small_Fp32_GpuAcc_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    Conv3DFp32SmallTest(backends);
+}
+
+} //End of TEST_SUITE("Convolution3dTest_GpuAccTests")
+
+#endif
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ConvolutionTestHelper.hpp b/delegate/test/ConvolutionTestHelper.hpp
new file mode 100644
index 0000000..2e211b2
--- /dev/null
+++ b/delegate/test/ConvolutionTestHelper.hpp
@@ -0,0 +1,784 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template <typename T, typename B = float>
+std::vector<char> CreateConv2dTfLiteModel(tflite::BuiltinOperator convolutionOperatorCode,
+                                          tflite::TensorType tensorType,
+                                          uint32_t strideX,
+                                          uint32_t strideY,
+                                          uint32_t dilationX,
+                                          uint32_t dilationY,
+                                          tflite::Padding padding,
+                                          tflite::ActivationFunctionType fused_activation_function,
+                                          const std::vector <int32_t>& inputTensorShape,
+                                          const std::vector <int32_t>& filterTensorShape,
+                                          const std::vector <int32_t>& biasTensorShape,
+                                          const std::vector <int32_t>& outputTensorShape,
+                                          const std::vector <T>& filterData,
+                                          const std::vector <B>& biasData,
+                                          const std::vector<float> biasScales = {1.0f},
+                                          const std::vector<int64_t> biasOffsets = {0},
+                                          const std::vector<float> filterScales = {1.0f},
+                                          const std::vector<int64_t> filterOffsets = {0},
+                                          float outputQuantScale = 2.0f,
+                                          int outputQuantOffset = 0,
+                                          float quantScale = 1.0f,
+                                          int quantOffset = 0,
+                                          int32_t depth_multiplier = 1,
+                                          int32_t filterQuantizationDim = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder);
+    buffers[1] = CreateBuffer(flatBufferBuilder);
+    buffers[2] = CreateBuffer(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
+                                                             sizeof(T) * filterData.size()));
+
+    buffers[3] = CreateBuffer(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
+                                                             sizeof(B) * biasData.size()));
+    buffers[4] = CreateBuffer(flatBufferBuilder);
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+    auto outputQuantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
+
+    auto filterQuantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>(filterScales),
+                                         flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
+                                         tflite::QuantizationDetails_NONE,
+                                         0,
+                                         filterQuantizationDim);
+
+    auto biasQuantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>(biasScales),
+                                         flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
+
+    std::array<flatbuffers::Offset<Tensor>, 4> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
+                                                                      filterTensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("filter"),
+                              filterQuantizationParameters);
+
+    auto biasTensorType = ::tflite::TensorType_FLOAT32;
+    if (tensorType == ::tflite::TensorType_INT8 || tensorType == ::tflite::TensorType_UINT8)
+    {
+        biasTensorType = ::tflite::TensorType_INT32;
+    }
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
+                              biasTensorType,
+                              3,
+                              flatBufferBuilder.CreateString("bias"),
+                              biasQuantizationParameters);
+    tensors[3] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              4,
+                              flatBufferBuilder.CreateString("output"),
+                              outputQuantizationParameters);
+
+    flatbuffers::Offset<void> operatorBuiltinOptions;
+    tflite::BuiltinOptions operatorBuiltinOptionsType;
+
+    if(convolutionOperatorCode == tflite::BuiltinOperator_DEPTHWISE_CONV_2D)
+    {
+        operatorBuiltinOptionsType = tflite::BuiltinOptions_DepthwiseConv2DOptions;
+        operatorBuiltinOptions = CreateDepthwiseConv2DOptions(flatBufferBuilder,
+                                                              padding,
+                                                              strideX,
+                                                              strideY,
+                                                              depth_multiplier,
+                                                              fused_activation_function,
+                                                              dilationX,
+                                                              dilationY).Union();
+    }
+    if(convolutionOperatorCode == tflite::BuiltinOperator_CONV_2D)
+    {
+        operatorBuiltinOptionsType = tflite::BuiltinOptions_Conv2DOptions;
+        operatorBuiltinOptions = CreateConv2DOptions(flatBufferBuilder,
+                                                     padding,
+                                                     strideX,
+                                                     strideY,
+                                                     fused_activation_function,
+                                                     dilationX,
+                                                     dilationY).Union();
+    }
+
+    // create operator
+    const std::vector<int> operatorInputs{0, 1, 2};
+    const std::vector<int> operatorOutputs{3};
+    flatbuffers::Offset <Operator> convolutionOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{0, 1, 2};
+    const std::vector<int> subgraphOutputs{3};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&convolutionOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Convolution2d Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, convolutionOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T, typename B = float>
+void ConvolutionTest(tflite::BuiltinOperator convolutionOperatorCode,
+                     tflite::TensorType tensorType,
+                     uint32_t strideX,
+                     uint32_t strideY,
+                     uint32_t dilationX,
+                     uint32_t dilationY,
+                     tflite::Padding padding,
+                     tflite::ActivationFunctionType fused_activation_function,
+                     std::vector<armnn::BackendId>& backends,
+                     std::vector<int32_t>& inputShape,
+                     std::vector<int32_t>& filterShape,
+                     std::vector<int32_t>& outputShape,
+                     std::vector<T>& inputValues,
+                     std::vector<T>& filterValues,
+                     std::vector<T>& expectedOutputValues,
+                     const std::vector<int32_t>& biasShape = {},
+                     const std::vector<B>& biasValues = {},
+                     const std::vector<float> biasScales = {1.0f},
+                     const std::vector<int64_t> biasOffsets = {0},
+                     const std::vector<float> filterScales = {1.0f},
+                     const std::vector<int64_t> filterOffsets = {0},
+                     float outputQuantScale = 2.0f,
+                     int outputQuantOffset = 0,
+                     float quantScale = 1.0f,
+                     int quantOffset = 0,
+                     int32_t depth_multiplier = 1,
+                     int32_t filterQuantizationDim = 3)
+
+{
+    using namespace tflite;
+
+    std::vector<char> modelBuffer;
+
+    modelBuffer = CreateConv2dTfLiteModel(convolutionOperatorCode,
+                                          tensorType,
+                                          strideX,
+                                          strideY,
+                                          dilationX,
+                                          dilationY,
+                                          padding,
+                                          fused_activation_function,
+                                          inputShape,
+                                          filterShape,
+                                          biasShape,
+                                          outputShape,
+                                          filterValues,
+                                          biasValues,
+                                          biasScales,
+                                          biasOffsets,
+                                          filterScales,
+                                          filterOffsets,
+                                          outputQuantScale,
+                                          outputQuantOffset,
+                                          quantScale,
+                                          quantOffset,
+                                          depth_multiplier,
+                                          filterQuantizationDim);
+
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+                        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelageInputData[i] = inputValues[i];
+    }
+
+    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        armnnDelegateInputData[i] = inputValues[i];
+    }
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
+    for (size_t i = 0; i < expectedOutputValues.size(); i++)
+    {
+        CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
+        CHECK(doctest::Approx(tfLiteDelagateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
+        CHECK(doctest::Approx(armnnDelegateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
+    }
+}
+
+// Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
+#if defined(ARMNN_POST_TFLITE_2_5)
+template <typename T, typename B = float>
+std::vector<char> CreateConv3dTfLiteModel(tflite::BuiltinOperator convolutionOperatorCode,
+                                          tflite::TensorType tensorType,
+                                          std::vector<uint32_t> strides,
+                                          std::vector<uint32_t> dilation,
+                                          tflite::Padding padding,
+                                          tflite::ActivationFunctionType fused_activation_function,
+                                          const std::vector<int32_t>& inputTensorShape,
+                                          const std::vector<int32_t>& filterTensorShape,
+                                          const std::vector<int32_t>& biasTensorShape,
+                                          const std::vector<int32_t>& outputTensorShape,
+                                          const std::vector<T>& filterData,
+                                          const std::vector<B>& biasData,
+                                          const std::vector<float> biasScales = {1.0f},
+                                          const std::vector<int64_t> biasOffsets = {0},
+                                          const std::vector<float> filterScales = {1.0f},
+                                          const std::vector<int64_t> filterOffsets = {0},
+                                          float outputQuantScale = 2.0f,
+                                          int outputQuantOffset = 0,
+                                          float quantScale = 1.0f,
+                                          int quantOffset = 0,
+                                          int32_t depth_multiplier = 1,
+                                          int32_t filterQuantizationDim = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder);
+    buffers[1] = CreateBuffer(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
+                                                             sizeof(T) * filterData.size()));
+
+    buffers[2] = CreateBuffer(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
+                                                             sizeof(B) * biasData.size()));
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+    auto outputQuantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
+
+    auto filterQuantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>(filterScales),
+                                         flatBufferBuilder.CreateVector<int64_t>(filterOffsets),
+                                         tflite::QuantizationDetails_NONE,
+                                         0,
+                                         filterQuantizationDim);
+
+    auto biasQuantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>(biasScales),
+                                         flatBufferBuilder.CreateVector<int64_t>(biasOffsets));
+
+    std::array<flatbuffers::Offset<Tensor>, 4> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
+                                                                      filterTensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("filter"),
+                              filterQuantizationParameters);
+
+    auto biasTensorType = ::tflite::TensorType_FLOAT32;
+    if (tensorType == ::tflite::TensorType_INT8 || tensorType == ::tflite::TensorType_UINT8)
+    {
+        biasTensorType = ::tflite::TensorType_INT32;
+    }
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
+                              biasTensorType,
+                              2,
+                              flatBufferBuilder.CreateString("bias"),
+                              biasQuantizationParameters);
+    tensors[3] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("output"),
+                              outputQuantizationParameters);
+
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_Conv3DOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateConv3DOptions(flatBufferBuilder,
+                                                                           padding,
+                                                                           strides[2], // Depth
+                                                                           strides[0], // Width
+                                                                           strides[1], // Height
+                                                                           fused_activation_function,
+                                                                           dilation[2],
+                                                                           dilation[0],
+                                                                           dilation[1]).Union();
+
+    // Create operator
+    const std::vector<int> operatorInputs{0, 1, 2};
+    const std::vector<int> operatorOutputs{3};
+    flatbuffers::Offset <Operator> convolutionOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{0, 1, 2};
+    const std::vector<int> subgraphOutputs{3};
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&convolutionOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: Convolution 3d Operator Model");
+
+    // If using an operator with a code greater than 127 then the enum value should be passed as the fifth
+    // parameter rather than the second like in other tests.
+    flatbuffers::Offset <OperatorCode> operatorCode =
+            CreateOperatorCode(flatBufferBuilder, 0, 0, 1, tflite::BuiltinOperator_CONV_3D);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T, typename B = float>
+void Convolution3dTest(tflite::BuiltinOperator convolutionOperatorCode,
+                       tflite::TensorType tensorType,
+                       std::vector<uint32_t> strides,
+                       std::vector<uint32_t> dilation,
+                       tflite::Padding padding,
+                       tflite::ActivationFunctionType fused_activation_function,
+                       std::vector<armnn::BackendId>& backends,
+                       std::vector<int32_t>& inputShape,
+                       std::vector<int32_t>& filterShape,
+                       std::vector<int32_t>& outputShape,
+                       std::vector<T>& inputValues,
+                       std::vector<T>& filterValues,
+                       std::vector<T>& expectedOutputValues,
+                       const std::vector<int32_t>& biasShape = {},
+                       const std::vector<B>& biasValues = {},
+                       const std::vector<float> biasScales = {1.0f},
+                       const std::vector<int64_t> biasOffsets = {0},
+                       const std::vector<float> filterScales = {1.0f},
+                       const std::vector<int64_t> filterOffsets = {0},
+                       float outputQuantScale = 2.0f,
+                       int outputQuantOffset = 0,
+                       float quantScale = 1.0f,
+                       int quantOffset = 0,
+                       int32_t depth_multiplier = 1,
+                       int32_t filterQuantizationDim = 3)
+{
+    using namespace tflite;
+
+    std::vector<char> modelBuffer;
+    modelBuffer = CreateConv3dTfLiteModel(convolutionOperatorCode,
+                                          tensorType,
+                                          strides,
+                                          dilation,
+                                          padding,
+                                          fused_activation_function,
+                                          inputShape,
+                                          filterShape,
+                                          biasShape,
+                                          outputShape,
+                                          filterValues,
+                                          biasValues,
+                                          biasScales,
+                                          biasOffsets,
+                                          filterScales,
+                                          filterOffsets,
+                                          outputQuantScale,
+                                          outputQuantOffset,
+                                          quantScale,
+                                          quantOffset,
+                                          depth_multiplier,
+                                          filterQuantizationDim);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+
+    armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size(), 1);
+    armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size(), 1);
+    armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size(), 1);
+}
+#endif
+
+template <typename T>
+std::vector<char> CreateTransposeConvTfLiteModel(tflite::TensorType tensorType,
+                                                 uint32_t strideX,
+                                                 uint32_t strideY,
+                                                 tflite::Padding padding,
+                                                 const std::vector <int32_t>& transposeTensorShape,
+                                                 const std::vector <int32_t>& filterTensorShape,
+                                                 const std::vector <int32_t>& inputTensorShape,
+                                                 const std::vector <int32_t>& outputTensorShape,
+                                                 const std::vector <int32_t>& transposeData,
+                                                 const std::vector <T>& filterData,
+                                                 float filterScale = 1.0f,
+                                                 int filterOffset = 0,
+                                                 float outputQuantScale = 2.0f,
+                                                 int outputQuantOffset = 0,
+                                                 float quantScale = 1.0f,
+                                                 int quantOffset = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder);
+    buffers[1] = CreateBuffer(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(transposeData.data()),
+                                                             sizeof(int32_t) * transposeData.size()));
+    buffers[2] = CreateBuffer(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
+                                                             sizeof(T) * filterData.size()));
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+    auto outputQuantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
+    auto filterQuantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ filterScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ filterOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 4> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(transposeTensorShape.data(),
+                              transposeTensorShape.size()),
+                              tflite::TensorType_INT32,
+                              1);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
+                              filterTensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("filter"),
+                              filterQuantizationParameters);
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                              inputTensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+    tensors[3] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                              outputTensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("output"),
+                              outputQuantizationParameters);
+
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_TransposeConvOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions =
+        CreateTransposeConvOptions(flatBufferBuilder, padding, strideX, strideY).Union();
+
+    // create operator
+    const std::vector<int> operatorInputs{0, 1, 2};
+    const std::vector<int> operatorOutputs{3};
+    flatbuffers::Offset <Operator> convolutionOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{0, 1, 2};
+    const std::vector<int> subgraphOutputs{3};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&convolutionOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: TransposeConv Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode =
+        CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_TRANSPOSE_CONV);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void TransposeConvTest(std::vector<armnn::BackendId>& backends,
+                       tflite::TensorType tensorType,
+                       uint32_t strideX,
+                       uint32_t strideY,
+                       tflite::Padding padding,
+                       const std::vector <int32_t>& transposeTensorShape,
+                       const std::vector <int32_t>& filterTensorShape,
+                       const std::vector <int32_t>& inputTensorShape,
+                       const std::vector <int32_t>& outputTensorShape,
+                       const std::vector <int32_t>& transposeData,
+                       const std::vector <T>& filterData,
+                       std::vector<T>& inputValues,
+                       std::vector<T>& expectedOutputValues,
+                       float filterScale = 1.0f,
+                       int filterOffset = 0,
+                       float outputQuantScale = 1.0f,
+                       int outputQuantOffset = 0,
+                       float quantScale = 1.0f,
+                       int quantOffset = 0)
+{
+    using namespace tflite;
+
+    std::vector<char> modelBuffer;
+    modelBuffer = CreateTransposeConvTfLiteModel<T>(tensorType,
+                                                    strideX,
+                                                    strideY,
+                                                    padding,
+                                                    transposeTensorShape,
+                                                    filterTensorShape,
+                                                    inputTensorShape,
+                                                    outputTensorShape,
+                                                    transposeData,
+                                                    filterData,
+                                                    filterScale,
+                                                    filterOffset,
+                                                    outputQuantScale,
+                                                    outputQuantOffset,
+                                                    quantScale,
+                                                    quantOffset);
+
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[2];
+    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelageInputData[i] = inputValues[i];
+    }
+
+    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[2];
+    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        armnnDelegateInputData[i] = inputValues[i];
+    }
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
+    for (size_t i = 0; i < expectedOutputValues.size(); i++)
+    {
+        CHECK(armnnDelegateOutputData[i] == expectedOutputValues[i]);
+        CHECK(tfLiteDelagateOutputData[i] == expectedOutputValues[i]);
+        CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
+    }
+}
+
+} // anonymous namespace
+
+
+
+
diff --git a/delegate/test/DelegateOptionsTest.cpp b/delegate/test/DelegateOptionsTest.cpp
new file mode 100644
index 0000000..ecd8c73
--- /dev/null
+++ b/delegate/test/DelegateOptionsTest.cpp
@@ -0,0 +1,372 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "DelegateOptionsTestHelper.hpp"
+#include <common/include/ProfilingGuid.hpp>
+#include <armnnUtils/Filesystem.hpp>
+
+namespace armnnDelegate
+{
+
+TEST_SUITE("DelegateOptions")
+{
+
+TEST_CASE ("ArmnnDelegateOptimizerOptionsReduceFp32ToFp16")
+{
+    std::stringstream ss;
+    {
+        StreamRedirector redirect(std::cout, ss.rdbuf());
+
+        std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+        std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+        std::vector<float> inputData = { 1, 2, 3, 4 };
+        std::vector<float> divData = { 2, 2, 3, 4 };
+        std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+        // Enable ReduceFp32ToFp16
+        armnn::OptimizerOptions optimizerOptions(true, true, false, false);
+        armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+
+        DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+                                  backends,
+                                  tensorShape,
+                                  inputData,
+                                  inputData,
+                                  divData,
+                                  expectedResult,
+                                  delegateOptions);
+    }
+    // ReduceFp32ToFp16 option is enabled
+    CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
+    CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
+}
+
+TEST_CASE ("ArmnnDelegateOptimizerOptionsDebug")
+{
+    std::stringstream ss;
+    {
+        StreamRedirector redirect(std::cout, ss.rdbuf());
+
+        std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+        std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+        std::vector<float> inputData = { 1, 2, 3, 4 };
+        std::vector<float> divData = { 2, 2, 3, 4 };
+        std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+        // Enable Debug
+        armnn::OptimizerOptions optimizerOptions(false, true, false, false);
+        armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+
+        DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+                                  backends,
+                                  tensorShape,
+                                  inputData,
+                                  inputData,
+                                  divData,
+                                  expectedResult,
+                                  delegateOptions);
+    }
+    // Debug option triggered.
+    CHECK(ss.str().find("layerGuid") != std::string::npos);
+    CHECK(ss.str().find("layerName") != std::string::npos);
+    CHECK(ss.str().find("outputSlot") != std::string::npos);
+    CHECK(ss.str().find("shape") != std::string::npos);
+    CHECK(ss.str().find("data") != std::string::npos);
+}
+
+TEST_CASE ("ArmnnDelegateOptimizerOptionsDebugFunction")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+    std::vector<float> inputData = { 1, 2, 3, 4 };
+    std::vector<float> divData = { 2, 2, 3, 4 };
+    std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+    // Enable debug with debug callback function
+    armnn::OptimizerOptions optimizerOptions(false, true, false, false);
+    bool callback = false;
+    auto mockCallback = [&](LayerGuid guid, unsigned int slotIndex, armnn::ITensorHandle* tensor)
+    {
+        armnn::IgnoreUnused(guid);
+        armnn::IgnoreUnused(slotIndex);
+        armnn::IgnoreUnused(tensor);
+        callback = true;
+    };
+
+    armnn::INetworkProperties networkProperties(false, armnn::MemorySource::Undefined, armnn::MemorySource::Undefined);
+    armnnDelegate::DelegateOptions delegateOptions(backends,
+                                                   optimizerOptions,
+                                                   armnn::EmptyOptional(),
+                                                   armnn::Optional<armnn::DebugCallbackFunction>(mockCallback));
+
+    CHECK(!callback);
+
+    DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+                              backends,
+                              tensorShape,
+                              inputData,
+                              inputData,
+                              divData,
+                              expectedResult,
+                              delegateOptions);
+
+    // Check that the debug callback function was called.
+    CHECK(callback);
+}
+
+TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
+{
+    std::vector<armnn::BackendId> backends = {  armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
+    std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+    std::vector<uint8_t> inputData = { 1, 2, 3, 4 };
+    std::vector<uint8_t> divData = { 2, 2, 3, 4 };
+    std::vector<uint8_t> expectedResult = { 1, 2, 2, 2 };
+
+    armnn::OptimizerOptions optimizerOptions(false, false, false, true);
+    armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+
+    DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
+                                backends,
+                                tensorShape,
+                                inputData,
+                                inputData,
+                                divData,
+                                expectedResult,
+                                delegateOptions);
+}
+
+TEST_CASE ("ArmnnDelegateStringParsingOptionDisableTfLiteRuntimeFallback")
+{
+    std::stringstream stringStream;
+    std::vector<std::string> keys   {  "backends", "debug-data", "disable-tflite-runtime-fallback"};
+    std::vector<std::string> values {    "CpuRef",          "1",                               "1"};
+
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+    std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
+    std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
+
+    // Create options_keys and options_values char array
+    size_t num_options = keys.size();
+    std::unique_ptr<const char*> options_keys =
+            std::unique_ptr<const char*>(new const char*[num_options + 1]);
+    std::unique_ptr<const char*> options_values =
+            std::unique_ptr<const char*>(new const char*[num_options + 1]);
+    for (size_t i=0; i<num_options; ++i)
+    {
+        options_keys.get()[i]   = keys[i].c_str();
+        options_values.get()[i] = values[i].c_str();
+    }
+
+    StreamRedirector redirect(std::cout, stringStream.rdbuf());
+
+    armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
+    DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
+                                        backends,
+                                        tensorShape,
+                                        inputData,
+                                        expectedResult,
+                                        delegateOptions);
+    CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
+                                                                                                 != std::string::npos);
+}
+
+TEST_CASE ("ArmnnDelegateStringParsingOptionEnableTfLiteRuntimeFallback")
+{
+    std::stringstream stringStream;
+    std::vector<std::string> keys   {  "backends", "debug-data", "disable-tflite-runtime-fallback"};
+    std::vector<std::string> values {    "CpuRef",          "1",                               "0"};
+
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+    std::vector<float> inputData = { 0.1f, -2.1f, 3.0f, -4.6f };
+    std::vector<float> expectedResult = { 1.0f, -2.0f, 3.0f, -4.0f };
+
+    // Create options_keys and options_values char array
+    size_t num_options = keys.size();
+    std::unique_ptr<const char*> options_keys =
+            std::unique_ptr<const char*>(new const char*[num_options + 1]);
+    std::unique_ptr<const char*> options_values =
+            std::unique_ptr<const char*>(new const char*[num_options + 1]);
+    for (size_t i=0; i<num_options; ++i)
+    {
+        options_keys.get()[i]   = keys[i].c_str();
+        options_values.get()[i] = values[i].c_str();
+    }
+
+    StreamRedirector redirect(std::cout, stringStream.rdbuf());
+
+    armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
+    DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
+                                        backends,
+                                        tensorShape,
+                                        inputData,
+                                        expectedResult,
+                                        delegateOptions);
+
+    CHECK(stringStream.str().find("TfLiteArmnnDelegate: There are unsupported operators in the model")
+                                                                                                 == std::string::npos);
+}
+
+}
+
+TEST_SUITE("DelegateOptions_CpuAccTests")
+{
+
+TEST_CASE ("ArmnnDelegateModelOptions_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+    std::vector<float> inputData = { 1, 2, 3, 4 };
+    std::vector<float> divData = { 2, 2, 3, 4 };
+    std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+    unsigned int numberOfThreads = 2;
+
+    armnn::ModelOptions modelOptions;
+    armnn::BackendOptions cpuAcc("CpuAcc",
+                                 {
+                                         { "FastMathEnabled", true },
+                                         { "NumberOfThreads", numberOfThreads }
+                                 });
+    modelOptions.push_back(cpuAcc);
+
+    armnn::OptimizerOptions optimizerOptions(false, false, false, false, modelOptions, false);
+    armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+
+    DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+                              backends,
+                              tensorShape,
+                              inputData,
+                              inputData,
+                              divData,
+                              expectedResult,
+                              delegateOptions);
+}
+
+TEST_CASE ("ArmnnDelegateSerializeToDot")
+{
+    const fs::path filename(fs::temp_directory_path() / "ArmnnDelegateSerializeToDot.dot");
+    if ( fs::exists(filename) )
+    {
+        fs::remove(filename);
+    }
+    std::stringstream ss;
+    {
+        StreamRedirector redirect(std::cout, ss.rdbuf());
+
+        std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+        std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+        std::vector<float> inputData = { 1, 2, 3, 4 };
+        std::vector<float> divData = { 2, 2, 3, 4 };
+        std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+        armnn::OptimizerOptions optimizerOptions(false, false, false, false);
+        armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
+        // Enable serialize to dot by specifying the target file name.
+        delegateOptions.SetSerializeToDot(filename);
+        DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+                                  backends,
+                                  tensorShape,
+                                  inputData,
+                                  inputData,
+                                  divData,
+                                  expectedResult,
+                                  delegateOptions);
+    }
+    CHECK(fs::exists(filename));
+    // The file should have a size greater than 0 bytes.
+    CHECK(fs::file_size(filename) > 0);
+    // Clean up.
+    fs::remove(filename);
+}
+
+void CreateFp16StringParsingTestRun(std::vector<std::string>& keys,
+                                    std::vector<std::string>& values,
+                                    std::stringstream& ss)
+{
+    StreamRedirector redirect(std::cout, ss.rdbuf());
+
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
+    std::vector<float> inputData = { 1, 2, 3, 4 };
+    std::vector<float> divData = { 2, 2, 3, 4 };
+    std::vector<float> expectedResult = { 1, 2, 2, 2 };
+
+    // Create options_keys and options_values char array
+    size_t num_options = keys.size();
+    std::unique_ptr<const char*> options_keys =
+            std::unique_ptr<const char*>(new const char*[num_options + 1]);
+    std::unique_ptr<const char*> options_values =
+            std::unique_ptr<const char*>(new const char*[num_options + 1]);
+    for (size_t i=0; i<num_options; ++i)
+    {
+        options_keys.get()[i]   = keys[i].c_str();
+        options_values.get()[i] = values[i].c_str();
+    }
+
+    armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
+    DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
+                              backends,
+                              tensorShape,
+                              inputData,
+                              inputData,
+                              divData,
+                              expectedResult,
+                              delegateOptions);
+}
+
+TEST_CASE ("ArmnnDelegateStringParsingOptionReduceFp32ToFp16")
+{
+    SUBCASE("Fp16=1")
+    {
+        std::stringstream ss;
+        std::vector<std::string> keys   {  "backends", "debug-data", "reduce-fp32-to-fp16", "logging-severity"};
+        std::vector<std::string> values {    "CpuRef",          "1",                   "1",             "info"};
+        CreateFp16StringParsingTestRun(keys, values, ss);
+        CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
+        CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
+    }
+    SUBCASE("Fp16=true")
+    {
+        std::stringstream ss;
+        std::vector<std::string> keys   {  "backends", "debug-data", "reduce-fp32-to-fp16"};
+        std::vector<std::string> values {    "CpuRef",       "TRUE",                "true"};
+        CreateFp16StringParsingTestRun(keys, values, ss);
+        CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
+        CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
+    }
+    SUBCASE("Fp16=True")
+    {
+        std::stringstream ss;
+        std::vector<std::string> keys   {  "backends", "debug-data", "reduce-fp32-to-fp16"};
+        std::vector<std::string> values {    "CpuRef",       "true",                "True"};
+        CreateFp16StringParsingTestRun(keys, values, ss);
+        CHECK(ss.str().find("convert_fp32_to_fp16") != std::string::npos);
+        CHECK(ss.str().find("convert_fp16_to_fp32") != std::string::npos);
+    }
+    SUBCASE("Fp16=0")
+    {
+        std::stringstream ss;
+        std::vector<std::string> keys   {  "backends", "debug-data", "reduce-fp32-to-fp16"};
+        std::vector<std::string> values {    "CpuRef",       "true",                   "0"};
+        CreateFp16StringParsingTestRun(keys, values, ss);
+        CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
+        CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
+    }
+    SUBCASE("Fp16=false")
+    {
+        std::stringstream ss;
+        std::vector<std::string> keys   {  "backends", "debug-data", "reduce-fp32-to-fp16"};
+        std::vector<std::string> values {    "CpuRef",     "1",               "false"};
+        CreateFp16StringParsingTestRun(keys, values, ss);
+        CHECK(ss.str().find("convert_fp32_to_fp16") == std::string::npos);
+        CHECK(ss.str().find("convert_fp16_to_fp32") == std::string::npos);
+    }
+}
+
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/test/DelegateOptionsTestHelper.hpp b/delegate/test/DelegateOptionsTestHelper.hpp
new file mode 100644
index 0000000..fb5403c
--- /dev/null
+++ b/delegate/test/DelegateOptionsTestHelper.hpp
@@ -0,0 +1,343 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+
+#include "TestUtils.hpp"
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+struct StreamRedirector
+{
+public:
+    StreamRedirector(std::ostream &stream, std::streambuf *newStreamBuffer)
+        : m_Stream(stream), m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer)) {}
+
+    ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); }
+
+private:
+    std::ostream &m_Stream;
+    std::streambuf *m_BackupBuffer;
+};
+
+std::vector<char> CreateAddDivTfLiteModel(tflite::TensorType tensorType,
+                                          const std::vector<int32_t>& tensorShape,
+                                          float quantScale = 1.0f,
+                                          int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+
+    std::array<flatbuffers::Offset<Tensor>, 5> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input_0"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("input_1"),
+                              quantizationParameters);
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              3,
+                              flatBufferBuilder.CreateString("input_2"),
+                              quantizationParameters);
+    tensors[3] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              4,
+                              flatBufferBuilder.CreateString("add"),
+                              quantizationParameters);
+    tensors[4] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              5,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // create operator
+    tflite::BuiltinOptions addBuiltinOptionsType = tflite::BuiltinOptions_AddOptions;
+    flatbuffers::Offset<void> addBuiltinOptions =
+        CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
+
+    tflite::BuiltinOptions divBuiltinOptionsType = tflite::BuiltinOptions_DivOptions;
+    flatbuffers::Offset<void> divBuiltinOptions =
+        CreateAddOptions(flatBufferBuilder, ActivationFunctionType_NONE).Union();
+
+    std::array<flatbuffers::Offset<Operator>, 2> operators;
+    const std::vector<int32_t> addInputs{0, 1};
+    const std::vector<int32_t> addOutputs{3};
+    operators[0] = CreateOperator(flatBufferBuilder,
+                                  0,
+                                  flatBufferBuilder.CreateVector<int32_t>(addInputs.data(), addInputs.size()),
+                                  flatBufferBuilder.CreateVector<int32_t>(addOutputs.data(), addOutputs.size()),
+                                  addBuiltinOptionsType,
+                                  addBuiltinOptions);
+    const std::vector<int32_t> divInputs{3, 2};
+    const std::vector<int32_t> divOutputs{4};
+    operators[1] = CreateOperator(flatBufferBuilder,
+                                  1,
+                                  flatBufferBuilder.CreateVector<int32_t>(divInputs.data(), divInputs.size()),
+                                  flatBufferBuilder.CreateVector<int32_t>(divOutputs.data(), divOutputs.size()),
+                                  divBuiltinOptionsType,
+                                  divBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{0, 1, 2};
+    const std::vector<int> subgraphOutputs{4};
+    flatbuffers::Offset<SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(operators.data(), operators.size()));
+
+    flatbuffers::Offset<flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Add and Div Operator Model");
+
+    std::array<flatbuffers::Offset<OperatorCode>, 2> codes;
+    codes[0] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_ADD);
+    codes[1] = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_DIV);
+
+    flatbuffers::Offset<Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(codes.data(), codes.size()),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+std::vector<char> CreateCeilTfLiteModel(tflite::TensorType tensorType,
+                                        const std::vector <int32_t>& tensorShape,
+                                        float quantScale = 1.0f,
+                                        int quantOffset = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({quantScale}),
+                                     flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
+
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    const std::vector<int32_t> operatorInputs({0});
+    const std::vector<int32_t> operatorOutputs({1});
+
+    flatbuffers::Offset<Operator> ceilOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       BuiltinOptions_NONE);
+
+    flatbuffers::Offset<flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: CEIL Operator Model");
+    flatbuffers::Offset<OperatorCode> operatorCode =
+        CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_CEIL);
+
+    const std::vector<int32_t> subgraphInputs({0});
+    const std::vector<int32_t> subgraphOutputs({1});
+    flatbuffers::Offset<SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&ceilOperator, 1));
+
+    flatbuffers::Offset<Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void DelegateOptionTest(tflite::TensorType tensorType,
+                        const std::vector<armnn::BackendId>& backends,
+                        std::vector<int32_t>& tensorShape,
+                        std::vector<T>& input0Values,
+                        std::vector<T>& input1Values,
+                        std::vector<T>& input2Values,
+                        std::vector<T>& expectedOutputValues,
+                        const armnnDelegate::DelegateOptions& delegateOptions,
+                        float quantScale = 1.0f,
+                        int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateAddDivTfLiteModel(tensorType,
+                                                            tensorShape,
+                                                            quantScale,
+                                                            quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values);
+    armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values);
+    armnnDelegate::FillInput(tfLiteInterpreter, 2, input2Values);
+
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values);
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values);
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 2, input2Values);
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
+
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+template <typename T>
+void DelegateOptionNoFallbackTest(tflite::TensorType tensorType,
+                                  const std::vector<armnn::BackendId>& backends,
+                                  std::vector<int32_t>& tensorShape,
+                                  std::vector<T>& inputValues,
+                                  std::vector<T>& expectedOutputValues,
+                                  const armnnDelegate::DelegateOptions& delegateOptions,
+                                  float quantScale = 1.0f,
+                                  int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateCeilTfLiteModel(tensorType,
+                                                          tensorShape,
+                                                          quantScale,
+                                                          quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    try
+    {
+        armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get());
+    }
+    catch (const armnn::Exception& e)
+    {
+        // Forward the exception message to std::cout
+        std::cout << e.what() << std::endl;
+    }
+
+    // Set input data
+    armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
+
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/DepthwiseConvolution2dTest.cpp b/delegate/test/DepthwiseConvolution2dTest.cpp
new file mode 100644
index 0000000..9ee589c
--- /dev/null
+++ b/delegate/test/DepthwiseConvolution2dTest.cpp
@@ -0,0 +1,282 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ConvolutionTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void DepthwiseConv2dValidReluFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 2, 2 };
+    std::vector<int32_t> filterShape { 1, 2, 2, 4 };
+    std::vector<int32_t> biasShape { 4 };
+    std::vector<int32_t> outputShape { 1, 3, 3, 1 };
+
+    static std::vector<float> inputValues =
+        {
+            1, 2,  7,  8,
+            3, 4,  9, 10,
+            5, 6, 11, 12
+        };
+
+    std::vector<float> filterValues =
+        {
+            1,    2,   3,   4,
+           -9,   10, -11,  12,
+            5,    6,   7,   8,
+            13,  -14,  15, -16
+        };
+
+    std::vector<float> biasValues = { 1, 2, 3, 4 };
+
+    std::vector<float> expectedOutputValues =
+        {
+            71, 0,  99, 0,
+            91, 0, 127, 0
+        };
+
+    tflite::Padding padding = tflite::Padding_VALID;
+    int32_t depth_multiplier = 2;
+
+    ConvolutionTest<float>(tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
+                           ::tflite::TensorType_FLOAT32,
+                           1, // strideX
+                           1, // strideY
+                           1, // dilationX
+                           1, // dilationY
+                           padding,
+                           tflite::ActivationFunctionType_RELU,
+                           backends,
+                           inputShape,
+                           filterShape,
+                           outputShape,
+                           inputValues,
+                           filterValues,
+                           expectedOutputValues,
+                           biasShape,
+                           biasValues,
+                           {1.0f}, // biasScale
+                           {0},    // biasOffset
+                           {1.0f}, // filterScale
+                           {0},    // filterOffsets
+                           2.0f,   // outputQuantScale
+                           0,      // outputQuantOffset
+                           1.0f,   // quantScale
+                           0,      // quantOffset
+                           depth_multiplier);
+}
+
+void DepthwiseConv2dSameUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 3, 1 };
+    std::vector<int32_t> filterShape { 1, 3, 3, 1 };
+    std::vector<int32_t> biasShape { 1 } ;
+    std::vector<int32_t> outputShape { 1, 3, 3, 1 };
+
+    static std::vector<uint8_t> inputValues =
+        {
+            0, 1, 2,
+            3, 4, 5,
+            6, 7, 8
+        };
+
+    std::vector<uint8_t> filterValues = { 9, 8, 7,  6, 5, 4,  3, 2, 1 };
+
+    std::vector<int32_t> biasValues = { 10 };
+
+    std::vector<uint8_t> expectedOutputValues =
+        {
+            12,  23, 24, // ( 14+10)/2, ( 35+10)/2, ( 38+10)/2,
+            34,  65, 61, // ( 57+10)/2, (120+10)/2, (111+10)/2,
+            60, 104, 84  // (110+10)/2, (197+10)/2, (158+10)/2
+        };
+
+    tflite::Padding padding = tflite::Padding_SAME;
+
+    ConvolutionTest<uint8_t, int32_t>(tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
+                                      ::tflite::TensorType_UINT8,
+                                      1, // strideX
+                                      1, // strideY
+                                      1, // dilationX
+                                      1, // dilationY
+                                      padding,
+                                      tflite::ActivationFunctionType_NONE,
+                                      backends,
+                                      inputShape,
+                                      filterShape,
+                                      outputShape,
+                                      inputValues,
+                                      filterValues,
+                                      expectedOutputValues,
+                                      biasShape,
+                                      biasValues);
+}
+
+void DepthwiseConv2dSameInt8PerChannelTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 4, 4, 4 };
+    std::vector<int32_t> filterShape { 1, 2, 2, 16 };
+    std::vector<int32_t> biasShape {16} ;
+    std::vector<int32_t> outputShape { 1, 4, 4, 16 };
+
+    static std::vector<int8_t> inputValues =
+        {
+            3,3,3,4, 4,4,0,0, 0,3,4,3, 0,2,2,3,
+            3,0,3,0, 0,3,2,1, 4,1,2,2, 0,0,0,4,
+            3,2,2,2, 2,1,0,4, 4,3,2,4, 3,2,0,0,
+            4,1,4,4, 1,0,4,3, 3,2,0,3, 1,1,0,2
+        };
+
+    std::vector<int8_t> filterValues = { 12,20,10, 3, 2,24, 9,10, 5,16,30,12, 3,10, 4,32,
+                                           8, 0,30, 3, 0,16,12,15,20,12, 0, 3, 9,20, 8, 8,
+                                          12,15,20, 0, 0, 0, 3,15,15, 8,40,12, 9, 5, 2,24,
+                                           4, 0, 0, 6, 6, 0, 3, 5,20, 8,20, 3, 6,15, 4, 0 };
+    std::vector<float> filterScales = {         0.25,   0.2,        0.1, 0.3333333333,
+                                                 0.5, 0.125, 0.33333333,          0.2,
+                                                 0.2,  0.25,        0.1,  0.333333333,
+                                        0.3333333333,   0.2,        0.5,        0.125 };
+
+    int32_t filterQuantizationDim = 3;
+
+    int32_t depth_multiplier = 4;
+
+    std::vector<int32_t> biasValues = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+    float inputScale = 1.0f;
+    std::vector<float> biasScales {};
+    std::vector<int64_t> biasOffsets {};
+    std::vector<int64_t> filterOffsets {};
+    for (const auto& filterScale: filterScales)
+    {
+        biasScales.push_back(inputScale * filterScale);
+        // filter and bias offset always needs to be zero for per channel. We don't support anything else
+        biasOffsets.push_back(0);
+        filterOffsets.push_back(0);
+    }
+
+    std::vector<int8_t> expectedOutputValues =
+        {
+            26,21,21, 7,12,17,28,21,20,22,25,26, 6,11,10,16,
+            16,16, 4,12, 7,18,28,27,30,20,12,14,16,19,17, 6,
+            12,12, 8, 0, 3,13,18,15,18,26,20,26,26,32,28,21,
+            0, 0, 0, 0, 2, 6, 6, 4, 2, 8, 6, 8,15,10,10,24,
+            20,21, 9, 7, 3, 6,15,16,17,22,17,22,17,18,14, 7,
+            18, 6,16,12,12,11,17,15,18,18,10,12,27,26,22,18,
+            27,28,12,10, 7, 3, 8,13, 8,12,14,16,26,24,24,24,
+            9, 9, 6, 0, 0, 0, 2, 6, 0, 0, 0, 0, 4, 8, 8,16,
+            26,24,17, 7, 2, 8,11,10,30,24,30,28,32,33,30,24,
+            20,11,16,12, 7, 9,17,13,20,14,16,18,31,36,33,29,
+            28,25,19, 9, 6,13,20,19, 2, 8, 6, 8,17,17,15,25,
+            12,15, 5, 3, 2, 6, 7, 7, 0, 0, 0, 0, 6, 2, 2, 6,
+            14,16, 7, 5, 1, 3, 3, 2,20,28,12,20,13,20,20,19,
+            9, 4,10, 4, 0, 4, 8, 6, 4,16,12,16,12,18,18,15,
+            11,12, 6, 4, 2, 8,10, 7, 0, 0, 0, 0, 9,14,14,14,
+            3, 4, 1, 1, 1, 3, 3, 2, 0, 0, 0, 0, 2, 4, 4, 8
+        };
+
+    tflite::Padding padding = tflite::Padding_SAME;
+
+    ConvolutionTest<int8_t, int32_t>(tflite::BuiltinOperator_DEPTHWISE_CONV_2D,
+                                      ::tflite::TensorType_INT8,
+                                      1, // strideX
+                                      1, // strideY
+                                      1, // dilationX
+                                      1, // dilationY
+                                      padding,
+                                      tflite::ActivationFunctionType_NONE,
+                                      backends,
+                                      inputShape,
+                                      filterShape,
+                                      outputShape,
+                                      inputValues,
+                                      filterValues,
+                                      expectedOutputValues,
+                                      biasShape,
+                                      biasValues,
+                                      biasScales,
+                                      biasOffsets,
+                                      filterScales,
+                                      filterOffsets,
+                                      1.0f,
+                                      0,
+                                      inputScale,
+                                      0,
+                                      depth_multiplier,
+                                      filterQuantizationDim);
+}
+
+TEST_SUITE("DepthwiseConv2d_CpuRef_Tests")
+{
+
+TEST_CASE ("DepthwiseConv2d_Valid_Relu_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    DepthwiseConv2dValidReluFp32Test(backends);
+}
+
+TEST_CASE ("DepthwiseConv2d_Same_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    DepthwiseConv2dSameUint8Test(backends);
+}
+
+TEST_CASE ("DepthwiseConv2d_Same_Int8_PerChannelQuantization_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    DepthwiseConv2dSameInt8PerChannelTest(backends);
+}
+
+}//End of TEST_SUITE("DepthwiseConv2d_CpuRef_Tests")
+
+TEST_SUITE("DepthwiseConv2d_CpuAcc_Tests")
+{
+
+TEST_CASE ("DepthwiseConv2d_Valid_Relu_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    DepthwiseConv2dValidReluFp32Test(backends);
+}
+
+TEST_CASE ("DepthwiseConv2d_Same_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    DepthwiseConv2dSameUint8Test(backends);
+}
+
+}//End of TEST_SUITE("DepthwiseConv2d_CpuAcc_Tests")
+
+TEST_SUITE("DepthwiseConv2d_GpuAcc_Tests")
+{
+
+TEST_CASE ("DepthwiseConv2d_Valid_Relu_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    DepthwiseConv2dValidReluFp32Test(backends);
+}
+
+TEST_CASE ("DepthwiseConv2d_Same_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    DepthwiseConv2dSameUint8Test(backends);
+}
+
+}//End of TEST_SUITE("DepthwiseConv2d_GpuAcc_Tests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ElementwiseBinaryTest.cpp b/delegate/test/ElementwiseBinaryTest.cpp
new file mode 100644
index 0000000..effed03
--- /dev/null
+++ b/delegate/test/ElementwiseBinaryTest.cpp
@@ -0,0 +1,1136 @@
+//
+// Copyright © 2020-2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseBinaryTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void AddFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 3 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 3 };
+
+    std::vector<float> input0Values =
+    {
+        0.0f, 2.0f, 1.0f,
+        0.2f, 1.0f, 2.0f,
+
+        1.0f, 2.0f, 1.0f,
+        0.2f, 1.0f, 2.0f,
+
+        0.0f, 2.0f, 1.0f,
+        4.2f, 1.0f, 2.0f,
+
+        0.0f, 0.0f, 1.0f,
+        0.2f, 1.0f, 2.0f,
+    };
+
+    std::vector<float> input1Values =
+    {
+        1.0f, 2.0f,  1.0f,
+        0.0f, 1.0f,  2.0f,
+
+        1.0f, 2.0f, -2.0f,
+        0.2f, 1.0f,  2.0f,
+
+        0.0f, 2.0f,  1.0f,
+        4.2f, 0.0f, -3.0f,
+
+        0.0f, 0.0f,  1.0f,
+        0.7f, 1.0f,  5.0f,
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        1.0f, 4.0f,  2.0f,
+        0.2f, 2.0f,  4.0f,
+
+        2.0f, 4.0f, -1.0f,
+        0.4f, 2.0f,  4.0f,
+
+        0.0f, 4.0f,  2.0f,
+        8.4f, 1.0f, -1.0f,
+
+        0.0f, 0.0f,  2.0f,
+        0.9f, 2.0f,  7.0f,
+    };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void AddBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 3, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 1, 2, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 3, 2, 3 };
+
+    std::vector<float> input0Values
+    {
+        0.0f,
+        1.0f,
+
+        2.0f,
+        3.0f,
+
+        4.0f,
+        5.0f,
+    };
+    std::vector<float> input1Values
+    {
+        0.5f, 1.5f, 2.5f,
+        3.5f, 4.5f, 5.5f,
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        0.5f, 1.5f, 2.5f,
+        4.5f, 5.5f, 6.5f,
+
+        2.5f, 3.5f, 4.5f,
+        6.5f, 7.5f, 8.5f,
+
+        4.5f, 5.5f, 6.5f,
+        8.5f, 9.5f, 10.5f,
+    };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void AddConstInputTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 3, 2, 1 };
+    std::vector<int32_t> input1Shape { 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 3, 2, 1 };
+
+    std::vector<float> input0Values
+        {
+            0.0f,
+            1.0f,
+
+            2.0f,
+            3.0f,
+
+            4.0f,
+            5.0f,
+        };
+    std::vector<float> input1Values
+        {
+            0.5f
+        };
+    // Set output data
+    std::vector<float> expectedOutputValues
+        {
+            0.5f,
+            1.5f,
+
+            2.5f,
+            3.5f,
+
+            4.5f,
+            5.5f,
+        };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues,
+                                 1.0f,
+                                 0,
+                                 true);
+}
+
+void AddActivationTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> input0Values { 4.0f, 0.8f, 0.7f, -0.8f };
+    std::vector<float> input1Values { 0.7f, -1.2f, 0.8f, 0.5f };
+    std::vector<float> expectedOutputValues { 4.7f, 0.0f, 1.5f, 0.0f };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_ADD,
+                                 tflite::ActivationFunctionType_RELU,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void AddUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<uint8_t> input0Values =
+    {
+        63,  35,  77,  70,  56, 112,
+        203,  28, 252, 168, 245,  91
+    };
+
+    std::vector<uint8_t> input1Values =
+    {
+        21,   7, 175, 231, 175, 210,
+        126, 161,  63,  21, 105, 126
+    };
+
+    std::vector<uint8_t> expectedOutputValues =
+    {
+        81,  39, 249, 255, 228, 255,
+        255, 186, 255, 186, 255, 214,
+    };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_ADD,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 7.0f, 3);
+}
+
+void DivFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        2.f, 2.f, 2.f, 2.f, 3.f, 3.f, 3.f, 3.f,
+        4.f, 4.f, 4.f, 4.f, 5.f, 5.f, 5.f, 5.f
+
+    };
+
+    std::vector<float> input1Values =
+    {
+        1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f,
+        4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        2.f, 2.f, 2.f, 2.f, 1.50f, 1.50f, 1.50f, 1.50f,
+        1.f, 1.f, 1.f, 1.f, 1.25f, 1.25f, 1.25f, 1.25f
+    };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_DIV,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void DivBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
+
+    std::vector<float> input0Values = { 2, 4, 6, 8, 10, 12, 14, 16 };
+    std::vector<float> input1Values = { 2 };
+    std::vector<float> expectedOutputValues = { 1, 2, 3, 4, 5, 6, 7, 8 };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_DIV,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void DivUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<uint8_t> input0Values =
+    {
+        2, 2, 2, 2,  3, 3, 3, 3,
+        4, 4, 4, 4,  5, 5, 5, 5
+
+    };
+
+    std::vector<uint8_t> input1Values =
+    {
+        1, 1, 1, 1,  2, 2, 2, 2,
+        4, 4, 4, 4,  4, 4, 4, 4
+    };
+
+    std::vector<uint8_t> expectedOutputValues =
+    {
+        8, 8, 8, 8,  6, 6, 6, 6,
+        4, 4, 4, 4,  5, 5, 5, 5
+    };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_DIV,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 0.25f, 0);
+}
+
+void FloorDivFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        -37.5f, -15.2f, -8.76f, -2.0f,  -2.6f, -1.0f,  -0.8f,   0.0f,
+          4.0f,   1.6f,  2.0f,   5.2f,   6.0f, 35.04f, 60.8f, 150.0f
+    };
+
+    std::vector<float> input1Values =
+    {
+        1.f, 1.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f,
+        4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f, 4.f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        -38.0f, -16.0f, -9.0f,  -2.0f, -2.0f, -1.0f,  -1.0f,  0.0f,
+          1.0f,   0.0f,  0.0f,   1.0f,  1.0f,  8.0f,  15.0f, 37.0f
+    };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_FLOOR_DIV,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+
+}
+
+void MaxFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        1.f, 1.f, 5.f, 1.f,  2.f, 2.f, 7.f, 2.f,
+        3.f, 3.f, 3.f, 3.f,  4.f, 4.f, 4.f, 4.f
+
+    };
+
+    std::vector<float> input1Values =
+    {
+        2.f, 2.f, 2.f, 2.f,  3.f, 3.f, 3.f, 3.f,
+        4.f, 4.f, 4.f, 4.f,  5.f, 5.f, 5.f, 5.f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        2.f,  2.f, 5.f,  2.f,   3.f,  3.f,  7.f,  3.f,
+        4.f, 4.f, 4.f, 4.f,  5.f, 5.f, 5.f, 5.f
+    };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MAXIMUM,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MaxBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
+
+    std::vector<float> input0Values = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f };
+    std::vector<float> input1Values = { 4.f };
+    std::vector<float> expectedOutputValues = { 4.f, 4.f, 4.f, 4.f, 5.f, 6.f, 7.f, 8.f };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MAXIMUM,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MaxUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<uint8_t> input0Values =
+    {
+        1, 1, 1, 1, 7, 8, 9, 9,
+        3, 3, 3, 3, 4, 4, 4, 4
+
+    };
+
+    std::vector<uint8_t> input1Values =
+    {
+        2, 2, 2, 2, 3, 3, 3, 3,
+        4, 4, 4, 4, 5, 5, 5, 5
+    };
+
+    std::vector<uint8_t> expectedOutputValues =
+    {
+        2, 2, 2, 2, 7, 8, 9, 9,
+        4, 4, 4, 4, 5, 5, 5, 5
+    };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MAXIMUM,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 1.0f, 0);
+}
+
+void MinFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        1.f, 1.f, 5.f, 1.f,  2.f, 2.f, 7.f, 2.f,
+        3.f, 3.f, 3.f, 3.f,  4.f, 4.f, 4.f, 4.f
+
+    };
+
+    std::vector<float> input1Values =
+    {
+        2.f, 2.f, 2.f, 2.f,  3.f, 3.f, 3.f, 3.f,
+        1.f, 1.f, 1.f, 1.f,  5.f, 5.f, 5.f, 5.f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        1.f,  1.f, 2.f,  1.f,   2.f,  2.f,  3.f,  2.f,
+        1.f, 1.f, 1.f, 1.f,  4.f, 4.f, 4.f, 4.f
+    };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MINIMUM,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MinBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
+
+    std::vector<float> input0Values = { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f };
+
+    std::vector<float> input1Values = { 4.f };
+
+    std::vector<float> expectedOutputValues = { 1.f, 2.f, 3.f, 4.f, 4.f, 4.f, 4.f, 4.f };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MINIMUM,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MinUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<uint8_t> input0Values =
+    {
+        1, 1, 1, 1, 7, 8, 9, 9,
+        3, 3, 3, 3, 4, 4, 4, 4
+
+    };
+
+    std::vector<uint8_t> input1Values =
+    {
+        2, 2, 2, 2, 3, 3, 3, 3,
+        4, 4, 4, 4, 5, 5, 5, 5
+    };
+
+    std::vector<uint8_t> expectedOutputValues =
+    {
+        1, 1, 1, 1, 3, 3, 3, 3,
+        3, 3, 3, 3, 4, 4, 4, 4
+    };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MINIMUM,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 1.0f, 0);
+}
+
+void MulFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 2, 2, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 2, 2, 2, 2 };
+
+    std::vector<float> input0Values =
+    {
+        1.f, 1.f, 1.f, 1.f,  2.f, 2.f, 2.f, 2.f,
+        3.f, 3.f, 3.f, 3.f,  4.f, 4.f, 4.f, 4.f
+
+    };
+
+    std::vector<float> input1Values =
+    {
+        2.f, 2.f, 2.f, 2.f,  3.f, 3.f, 3.f, 3.f,
+        4.f, 4.f, 4.f, 4.f,  5.f, 5.f, 5.f, 5.f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        2.f,  2.f,  2.f,  2.f,   6.f,  6.f,  6.f,  6.f,
+        12.f, 12.f, 12.f, 12.f,  20.f, 20.f, 20.f, 20.f
+    };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MulBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 2 };
+
+    std::vector<float> input0Values = { 2, 4, 6, 8, 10, 12, 14, 16 };
+    std::vector<float> input1Values = { 2 };
+    std::vector<float> expectedOutputValues = { 4, 8, 12, 16, 20, 24, 28, 32 };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void MulUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 3 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 3 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 3 };
+
+    std::vector<uint8_t> input0Values =
+    {
+        1, 2, 3,    4,  5,  6,
+        7, 8, 9,   10, 11, 12
+
+    };
+
+    std::vector<uint8_t> input1Values = { 1, 2, 3 };
+
+    std::vector<uint8_t> expectedOutputValues =
+    {
+        1,  4,   9,     4, 10, 18,
+        7, 16,  27,    10, 22, 36
+    };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_MUL,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 1.0f, 0);
+}
+
+void MulActivationTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2, 1 };
+
+    std::vector<float> input0Values { 4.0f, 0.0f, 1.0f, 0.5f };
+    std::vector<float> input1Values { -2.0f, -1.2f, 2.5f, 2.0f };
+    std::vector<float> expectedOutputValues { 0.0f, 0.0f, 2.5f, 1.0f };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_MUL,
+                                 tflite::ActivationFunctionType_RELU,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void SubFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 1, 1, 2, 2 };
+
+    std::vector<float> input0Values = { 1, 3, 3, -7 };
+    std::vector<float> input1Values = { 1, -1, 0, -2 };
+    std::vector<float> expectedOutputValues = { 0, 4, 3, -5 };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_SUB,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void SubBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 1, 2, 2 };
+
+    std::vector<float> input0Values = { 2, 3, 4, 5};
+    std::vector<float> input1Values = { 10 };
+    std::vector<float> expectedOutputValues = { -8, -7, -6, -5 };
+
+    ElementwiseBinaryTest<float>(tflite::BuiltinOperator_SUB,
+                                 tflite::ActivationFunctionType_NONE,
+                                 ::tflite::TensorType_FLOAT32,
+                                 backends,
+                                 input0Shape,
+                                 input1Shape,
+                                 expectedOutputShape,
+                                 input0Values,
+                                 input1Values,
+                                 expectedOutputValues);
+}
+
+void SubUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 1, 2, 2 };
+
+    std::vector<uint8_t> input0Values = { 10, 12, 14, 16 };
+    std::vector<uint8_t> input1Values = { 2 };
+    std::vector<uint8_t> expectedOutputValues = { 8, 10, 12, 14 };
+
+    ElementwiseBinaryTest<uint8_t>(tflite::BuiltinOperator_SUB,
+                                   tflite::ActivationFunctionType_NONE,
+                                   ::tflite::TensorType_UINT8,
+                                   backends,
+                                   input0Shape,
+                                   input1Shape,
+                                   expectedOutputShape,
+                                   input0Values,
+                                   input1Values,
+                                   expectedOutputValues, 1.0f, 0);
+}
+
+TEST_SUITE("ElementwiseBinary_GpuAccTests")
+{
+
+TEST_CASE ("ADD_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AddFP32Test(backends);
+}
+
+TEST_CASE ("ADD_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AddBroadcastTest(backends);
+}
+
+TEST_CASE ("ADD_Activation_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AddActivationTest(backends);
+}
+
+TEST_CASE ("ADD_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AddUint8Test(backends);
+}
+
+TEST_CASE ("DIV_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    DivFP32Test(backends);
+}
+
+TEST_CASE ("DIV_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    DivBroadcastTest(backends);
+}
+
+TEST_CASE ("FLOORDIV_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    FloorDivFP32Test(backends);
+}
+
+TEST_CASE ("MAX_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxFP32Test(backends);
+}
+
+TEST_CASE ("MAX_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxBroadcastTest(backends);
+}
+
+TEST_CASE ("MAX_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxUint8Test(backends);
+}
+
+TEST_CASE ("MIN_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MinFP32Test(backends);
+}
+
+TEST_CASE ("MIN_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MinBroadcastTest(backends);
+}
+
+TEST_CASE ("MIN_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MinUint8Test(backends);
+}
+
+TEST_CASE ("MUL_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MulFP32Test(backends);
+}
+
+TEST_CASE ("MUL_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MulBroadcastTest(backends);
+}
+
+TEST_CASE ("MUL_Activation_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MulActivationTest(backends);
+}
+
+TEST_CASE ("MUL_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MulUint8Test(backends);
+}
+
+TEST_CASE ("SUB_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    SubFP32Test(backends);
+}
+
+TEST_CASE ("SUB_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    SubBroadcastTest(backends);
+}
+
+TEST_CASE ("SUB_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    SubUint8Test(backends);
+}
+
+} //TEST_SUITE("ElementwiseBinary_GpuAccTests")
+
+
+
+TEST_SUITE("ElementwiseBinary_CpuAccTests")
+{
+
+TEST_CASE ("ADD_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AddFP32Test(backends);
+}
+
+TEST_CASE ("ADD_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AddBroadcastTest(backends);
+}
+
+TEST_CASE ("ADD_Activation_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AddActivationTest(backends);
+}
+
+TEST_CASE ("ADD_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AddUint8Test(backends);
+}
+
+TEST_CASE ("DIV_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    DivFP32Test(backends);
+}
+
+TEST_CASE ("DIV_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    DivBroadcastTest(backends);
+}
+
+TEST_CASE ("FLOORDIV_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    FloorDivFP32Test(backends);
+}
+
+TEST_CASE ("MAX_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxFP32Test(backends);
+}
+
+TEST_CASE ("MAX_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxBroadcastTest(backends);
+}
+
+TEST_CASE ("MAX_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxUint8Test(backends);
+}
+
+TEST_CASE ("MIN_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MinFP32Test(backends);
+}
+
+TEST_CASE ("MIN_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MinBroadcastTest(backends);
+}
+
+TEST_CASE ("MIN_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MinUint8Test(backends);
+}
+
+TEST_CASE ("MUL_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MulFP32Test(backends);
+}
+
+TEST_CASE ("MUL_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MulBroadcastTest(backends);
+}
+
+TEST_CASE ("MUL_Actiation_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MulActivationTest(backends);
+}
+
+TEST_CASE ("MUL_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MulUint8Test(backends);
+}
+
+TEST_CASE ("SUB_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    SubFP32Test(backends);
+}
+
+TEST_CASE ("SUB_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    SubBroadcastTest(backends);
+}
+
+TEST_CASE ("SUB_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    SubUint8Test(backends);
+}
+
+} // TEST_SUITE("ElementwiseBinary_CpuAccTests")
+
+
+TEST_SUITE("ElementwiseBinary_CpuRefTests")
+{
+
+TEST_CASE ("ADD_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AddFP32Test(backends);
+}
+
+TEST_CASE ("ADD_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AddBroadcastTest(backends);
+}
+
+TEST_CASE ("ADD_Constant_Input_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AddConstInputTest(backends);
+}
+
+TEST_CASE ("ADD_Activation_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AddActivationTest(backends);
+}
+
+TEST_CASE ("ADD_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AddUint8Test(backends);
+}
+
+TEST_CASE ("DIV_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    DivFP32Test(backends);
+}
+
+TEST_CASE ("DIV_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    DivBroadcastTest(backends);
+}
+
+TEST_CASE ("FLOORDIV_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    FloorDivFP32Test(backends);
+}
+
+TEST_CASE ("DIV_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    DivUint8Test(backends);
+}
+
+TEST_CASE ("MAX_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxFP32Test(backends);
+}
+
+TEST_CASE ("MAX_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxBroadcastTest(backends);
+}
+
+TEST_CASE ("MAX_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxUint8Test(backends);
+}
+
+TEST_CASE ("MIN_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MinFP32Test(backends);
+}
+
+TEST_CASE ("MIN_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MinBroadcastTest(backends);
+}
+
+TEST_CASE ("MIN_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MinUint8Test(backends);
+}
+
+TEST_CASE ("MUL_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MulFP32Test(backends);
+}
+
+TEST_CASE ("MUL_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MulBroadcastTest(backends);
+}
+
+TEST_CASE ("MUL_Actiation_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MulActivationTest(backends);
+}
+
+TEST_CASE ("MUL_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MulUint8Test(backends);
+}
+
+TEST_CASE ("SUB_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    SubFP32Test(backends);
+}
+
+TEST_CASE ("SUB_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    SubBroadcastTest(backends);
+}
+
+TEST_CASE ("SUB_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    SubUint8Test(backends);
+}
+
+} // TEST_SUITE("ElementwiseBinary_CpuRefTests")
+
+} // namespace armnnDelegate
diff --git a/delegate/test/ElementwiseBinaryTestHelper.hpp b/delegate/test/ElementwiseBinaryTestHelper.hpp
new file mode 100644
index 0000000..47ee7c2
--- /dev/null
+++ b/delegate/test/ElementwiseBinaryTestHelper.hpp
@@ -0,0 +1,243 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template <typename T>
+std::vector<char> CreateElementwiseBinaryTfLiteModel(tflite::BuiltinOperator binaryOperatorCode,
+                                                     tflite::ActivationFunctionType activationType,
+                                                     tflite::TensorType tensorType,
+                                                     const std::vector <int32_t>& input0TensorShape,
+                                                     const std::vector <int32_t>& input1TensorShape,
+                                                     const std::vector <int32_t>& outputTensorShape,
+                                                     std::vector<T>& input1Values,
+                                                     bool constantInput = false,
+                                                     float quantScale = 1.0f,
+                                                     int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    if (constantInput)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(input1Values.data()),
+                                                        sizeof(T) * input1Values.size())));
+    }
+    else
+    {
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
+    }
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
+                                                                      input0TensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input_0"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
+                                                                      input1TensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("input_1"),
+                              quantizationParameters);
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              3,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
+    flatbuffers::Offset<void> operatorBuiltinOptions = 0;
+    switch (binaryOperatorCode)
+    {
+        case BuiltinOperator_ADD:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_AddOptions;
+            operatorBuiltinOptions = CreateAddOptions(flatBufferBuilder, activationType).Union();
+            break;
+        }
+        case BuiltinOperator_DIV:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_DivOptions;
+            operatorBuiltinOptions = CreateDivOptions(flatBufferBuilder, activationType).Union();
+            break;
+        }
+        case BuiltinOperator_MAXIMUM:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions;
+            operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_MINIMUM:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_MaximumMinimumOptions;
+            operatorBuiltinOptions = CreateMaximumMinimumOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_MUL:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_MulOptions;
+            operatorBuiltinOptions = CreateMulOptions(flatBufferBuilder, activationType).Union();
+            break;
+        }
+        case BuiltinOperator_SUB:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_SubOptions;
+            operatorBuiltinOptions = CreateSubOptions(flatBufferBuilder, activationType).Union();
+            break;
+        }
+        case BuiltinOperator_FLOOR_DIV:
+        {
+            operatorBuiltinOptionsType = tflite::BuiltinOptions_FloorDivOptions;
+            operatorBuiltinOptions = CreateSubOptions(flatBufferBuilder, activationType).Union();
+            break;
+        }
+        default:
+            break;
+    }
+    const std::vector<int32_t> operatorInputs{0, 1};
+    const std::vector<int32_t> operatorOutputs{2};
+    flatbuffers::Offset <Operator> elementwiseBinaryOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{0, 1};
+    const std::vector<int> subgraphOutputs{2};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&elementwiseBinaryOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Elementwise Binary Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, binaryOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void ElementwiseBinaryTest(tflite::BuiltinOperator binaryOperatorCode,
+                           tflite::ActivationFunctionType activationType,
+                           tflite::TensorType tensorType,
+                           std::vector<armnn::BackendId>& backends,
+                           std::vector<int32_t>& input0Shape,
+                           std::vector<int32_t>& input1Shape,
+                           std::vector<int32_t>& outputShape,
+                           std::vector<T>& input0Values,
+                           std::vector<T>& input1Values,
+                           std::vector<T>& expectedOutputValues,
+                           float quantScale = 1.0f,
+                           int quantOffset  = 0,
+                           bool constantInput = false)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateElementwiseBinaryTfLiteModel<T>(binaryOperatorCode,
+                                                                          activationType,
+                                                                          tensorType,
+                                                                          input0Shape,
+                                                                          input1Shape,
+                                                                          outputShape,
+                                                                          input1Values,
+                                                                          constantInput,
+                                                                          quantScale,
+                                                                          quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr <Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr <Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                     armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
+    if (!constantInput)
+    {
+        armnnDelegate::FillInput<T>(tfLiteInterpreter, 1, input1Values);
+        armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 1, input1Values);
+    }
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
+                                        armnnDelegateInterpreter,
+                                        outputShape,
+                                        expectedOutputValues);
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ElementwiseUnaryTest.cpp b/delegate/test/ElementwiseUnaryTest.cpp
new file mode 100644
index 0000000..6331436
--- /dev/null
+++ b/delegate/test/ElementwiseUnaryTest.cpp
@@ -0,0 +1,420 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseUnaryTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+TEST_SUITE("ElementwiseUnary_GpuAccTests")
+{
+
+TEST_CASE ("Abs_Float32_GpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        -0.1f, -0.2f, -0.3f,
+        0.1f,  0.2f,  0.3f
+    };
+    // Calculate output data
+    std::vector<float> expectedOutputValues(inputValues.size());
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        expectedOutputValues[i] = std::abs(inputValues[i]);
+    }
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Exp_Float32_GpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        5.0f, 4.0f,
+        3.0f, 2.0f,
+        1.0f, 1.1f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        148.413159102577f, 54.598150033144f,
+        20.085536923188f,  7.389056098931f,
+        2.718281828459f,  3.004166023946f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Log_Float32_GpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.0f, 1.0f,  2.0f,
+        3.0f,  4.0f, 2.71828f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        0.f,  0.f,  0.69314718056f,
+        1.09861228867f, 1.38629436112f, 0.99999932734f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_LOG, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Neg_Float32_GpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.f, 0.f, 3.f,
+        25.f, 64.f, 100.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        -1.f, 0.f, -3.f,
+        -25.f, -64.f, -100.f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Rsqrt_Float32_GpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.f, 4.f, 16.f,
+        25.f, 64.f, 100.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        1.f, 0.5f, 0.25f,
+        0.2f, 0.125f, 0.1f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Sin_Float32_GpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+            0.0f, 1.0f, 16.0f,
+            0.5f, 36.0f, -1.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+            0.0f, 0.8414709848f, -0.28790331666f,
+            0.4794255386f, -0.99177885344f, -0.8414709848f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SIN, backends, inputValues, expectedOutputValues);
+}
+} // TEST_SUITE("ElementwiseUnary_GpuAccTests")
+
+
+
+TEST_SUITE("ElementwiseUnary_CpuAccTests")
+{
+
+TEST_CASE ("Abs_Float32_CpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        -0.1f, -0.2f, -0.3f,
+        0.1f,  0.2f,  0.3f
+    };
+    // Calculate output data
+    std::vector<float> expectedOutputValues(inputValues.size());
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        expectedOutputValues[i] = std::abs(inputValues[i]);
+    }
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Exp_Float32_CpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        5.0f, 4.0f,
+        3.0f, 2.0f,
+        1.0f, 1.1f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        148.413159102577f, 54.598150033144f,
+        20.085536923188f,  7.389056098931f,
+        2.718281828459f,  3.004166023946f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Log_Float32_CpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.0f, 1.0f,  2.0f,
+        3.0f,  4.0f, 2.71828f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        0.f,  0.f,  0.69314718056f,
+        1.09861228867f, 1.38629436112f, 0.99999932734f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_LOG, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Neg_Float32_CpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.f, 0.f, 3.f,
+        25.f, 64.f, 100.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        -1.f, 0.f, -3.f,
+        -25.f, -64.f, -100.f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Rsqrt_Float32_CpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.f, 4.f, 16.f,
+        25.f, 64.f, 100.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        1.f, 0.5f, 0.25f,
+        0.2f, 0.125f, 0.1f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Sin_Float32_CpuAcc_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        0.0f, 1.0f, 16.0f,
+        0.5f, 36.0f, -1.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        0.0f, 0.8414709848f, -0.28790331666f,
+        0.4794255386f, -0.99177885344f, -0.8414709848f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SIN, backends, inputValues, expectedOutputValues);
+}
+} // TEST_SUITE("ElementwiseUnary_CpuAccTests")
+
+TEST_SUITE("ElementwiseUnary_CpuRefTests")
+{
+
+TEST_CASE ("Abs_Float32_CpuRef_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        -0.1f, -0.2f, -0.3f,
+        0.1f,  0.2f,  0.3f
+    };
+    // Calculate output data
+    std::vector<float> expectedOutputValues(inputValues.size());
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        expectedOutputValues[i] = std::abs(inputValues[i]);
+    }
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_ABS, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Exp_Float32_CpuRef_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        5.0f, 4.0f,
+        3.0f, 2.0f,
+        1.0f, 1.1f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        148.413159102577f, 54.598150033144f,
+        20.085536923188f,  7.389056098931f,
+        2.718281828459f,  3.004166023946f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_EXP, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Log_Float32_CpuRef_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.0f, 1.0f,  2.0f,
+        3.0f,  4.0f, 2.71828f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        0.f,  0.f,  0.69314718056f,
+        1.09861228867f, 1.38629436112f, 0.99999932734f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_LOG, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Neg_Float32_CpuRef_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.f, 0.f, 3.f,
+        25.f, 64.f, 100.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        -1.f, 0.f, -3.f,
+        -25.f, -64.f, -100.f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_NEG, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Rsqrt_Float32_CpuRef_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        1.f, 4.f, 16.f,
+        25.f, 64.f, 100.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+        1.f, 0.5f, 0.25f,
+        0.2f, 0.125f, 0.1f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_RSQRT, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Sqrt_Float32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+        9.0f, 4.25f, 81.9f,
+        0.1f,  0.9f,  169.0f
+    };
+    // Calculate output data
+    std::vector<float> expectedOutputValues(inputValues.size());
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        expectedOutputValues[i] = std::sqrt(inputValues[i]);
+    }
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SQRT, backends, inputValues, expectedOutputValues);
+}
+
+TEST_CASE ("Sin_Float32_CpuRef_Test")
+{
+    // Create the ArmNN Delegate
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    // Set input data
+    std::vector<float> inputValues
+    {
+            0.0f, 1.0f, 16.0f,
+            0.5f, 36.0f, -1.f
+    };
+    // Set output data
+    std::vector<float> expectedOutputValues
+    {
+            0.0f, 0.8414709848f, -0.28790331666f,
+            0.4794255386f, -0.99177885344f, -0.8414709848f
+    };
+
+    ElementwiseUnaryFP32Test(tflite::BuiltinOperator_SIN, backends, inputValues, expectedOutputValues);
+}
+} // TEST_SUITE("ElementwiseUnary_CpuRefTests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ElementwiseUnaryTestHelper.hpp b/delegate/test/ElementwiseUnaryTestHelper.hpp
new file mode 100644
index 0000000..f6a534a
--- /dev/null
+++ b/delegate/test/ElementwiseUnaryTestHelper.hpp
@@ -0,0 +1,189 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateElementwiseUnaryTfLiteModel(tflite::BuiltinOperator unaryOperatorCode,
+                                                    tflite::TensorType tensorType,
+                                                    const std::vector <int32_t>& tensorShape)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder);
+
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
+                              tensorType);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(), tensorShape.size()),
+                              tensorType);
+
+    // create operator
+    const std::vector<int> operatorInputs{0};
+    const std::vector<int> operatorOutputs{1};
+    flatbuffers::Offset <Operator> unaryOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
+
+    const std::vector<int> subgraphInputs{0};
+    const std::vector<int> subgraphOutputs{1};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&unaryOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Elementwise Unary Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, unaryOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+void ElementwiseUnaryFP32Test(tflite::BuiltinOperator unaryOperatorCode,
+                              std::vector<armnn::BackendId>& backends,
+                              std::vector<float>& inputValues,
+                              std::vector<float>& expectedOutputValues)
+{
+    using namespace tflite;
+    std::vector<int32_t> inputShape  { { 3, 1, 2} };
+    std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
+                                                                      ::tflite::TensorType_FLOAT32,
+                                                                      inputShape);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+                        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
+    armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, inputShape, expectedOutputValues);
+
+    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.reset(nullptr);
+}
+
+void ElementwiseUnaryBoolTest(tflite::BuiltinOperator unaryOperatorCode,
+                              std::vector<armnn::BackendId>& backends,
+                              std::vector<int32_t>& inputShape,
+                              std::vector<bool>& inputValues,
+                              std::vector<bool>& expectedOutputValues)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
+                                                                      ::tflite::TensorType_BOOL,
+                                                                      inputShape);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
+    armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function
+    // directly instead. This is because Boolean types get converted to a bit representation in a vector.
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<bool>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<bool>(armnnDelegateOutputId);
+
+    armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size());
+    armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+
+    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
+
+
+
+
diff --git a/delegate/test/FillTest.cpp b/delegate/test/FillTest.cpp
new file mode 100644
index 0000000..a12715c
--- /dev/null
+++ b/delegate/test/FillTest.cpp
@@ -0,0 +1,221 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "FillTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void Fill2dTest(std::vector<armnn::BackendId>& backends,
+               tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+               float fill = 2.0f )
+{
+    std::vector<int32_t> inputShape { 2 };
+    std::vector<int32_t> tensorShape { 2, 2 };
+    std::vector<float> expectedOutputValues = { fill, fill,
+                                                fill, fill };
+
+    FillTest<float>(fillOperatorCode,
+                    ::tflite::TensorType_FLOAT32,
+                    backends,
+                    inputShape,
+                    tensorShape,
+                    expectedOutputValues,
+                    fill);
+}
+
+void Fill3dTest(std::vector<armnn::BackendId>& backends,
+               tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+               float fill = 5.0f )
+{
+    std::vector<int32_t> inputShape { 3 };
+    std::vector<int32_t> tensorShape { 3, 3, 3 };
+    std::vector<float> expectedOutputValues = { fill, fill, fill,
+                                                fill, fill, fill,
+                                                fill, fill, fill,
+
+                                                fill, fill, fill,
+                                                fill, fill, fill,
+                                                fill, fill, fill,
+
+                                                fill, fill, fill,
+                                                fill, fill, fill,
+                                                fill, fill, fill };
+
+    FillTest<float>(fillOperatorCode,
+                    ::tflite::TensorType_FLOAT32,
+                    backends,
+                    inputShape,
+                    tensorShape,
+                    expectedOutputValues,
+                    fill);
+}
+
+void Fill4dTest(std::vector<armnn::BackendId>& backends,
+               tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+               float fill = 3.0f )
+{
+    std::vector<int32_t> inputShape { 4 };
+    std::vector<int32_t> tensorShape { 2, 2, 4, 4 };
+    std::vector<float> expectedOutputValues = { fill, fill, fill, fill,
+                                                fill, fill, fill, fill,
+                                                fill, fill, fill, fill,
+                                                fill, fill, fill, fill,
+
+                                                fill, fill, fill, fill,
+                                                fill, fill, fill, fill,
+                                                fill, fill, fill, fill,
+                                                fill, fill, fill, fill,
+
+                                                fill, fill, fill, fill,
+                                                fill, fill, fill, fill,
+                                                fill, fill, fill, fill,
+                                                fill, fill, fill, fill,
+
+                                                fill, fill, fill, fill,
+                                                fill, fill, fill, fill,
+                                                fill, fill, fill, fill,
+                                                fill, fill, fill, fill };
+
+    FillTest<float>(fillOperatorCode,
+                    ::tflite::TensorType_FLOAT32,
+                    backends,
+                    inputShape,
+                    tensorShape,
+                    expectedOutputValues,
+                    fill);
+}
+
+void FillInt32Test(std::vector<armnn::BackendId>& backends,
+                  tflite::BuiltinOperator fillOperatorCode = tflite::BuiltinOperator_FILL,
+                  int32_t fill = 2 )
+{
+    std::vector<int32_t> inputShape { 2 };
+    std::vector<int32_t> tensorShape { 2, 2 };
+    std::vector<int32_t> expectedOutputValues = { fill, fill,
+                                                  fill, fill };
+
+    FillTest<int32_t>(fillOperatorCode,
+                      ::tflite::TensorType_INT32,
+                      backends,
+                      inputShape,
+                      tensorShape,
+                      expectedOutputValues,
+                      fill);
+}
+
+TEST_SUITE("Fill_CpuRefTests")
+{
+
+TEST_CASE ("Fill2d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Fill2dTest(backends);
+}
+
+TEST_CASE ("Fill3d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill3d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill4d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Fill4dTest(backends);
+}
+
+TEST_CASE ("FillInt32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    FillInt32Test(backends);
+}
+
+}
+
+TEST_SUITE("Fill_CpuAccTests")
+{
+
+TEST_CASE ("Fill2d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Fill2dTest(backends);
+}
+
+TEST_CASE ("Fill3d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill3d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill4d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Fill4dTest(backends);
+}
+
+TEST_CASE ("FillInt32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    FillInt32Test(backends);
+}
+
+}
+
+TEST_SUITE("Fill_GpuAccTests")
+{
+
+TEST_CASE ("Fill2d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Fill2dTest(backends);
+}
+
+TEST_CASE ("Fill3d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill3d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Fill3dTest(backends);
+}
+
+TEST_CASE ("Fill4d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Fill4dTest(backends);
+}
+
+TEST_CASE ("FillInt32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    FillInt32Test(backends);
+}
+
+}
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/FillTestHelper.hpp b/delegate/test/FillTestHelper.hpp
new file mode 100644
index 0000000..c8aadb0
--- /dev/null
+++ b/delegate/test/FillTestHelper.hpp
@@ -0,0 +1,159 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template <typename T>
+std::vector<char> CreateFillTfLiteModel(tflite::BuiltinOperator fillOperatorCode,
+                                        tflite::TensorType tensorType,
+                                        const std::vector<int32_t>& inputShape,
+                                        const std::vector <int32_t>& tensorShape,
+                                        const std::vector<T> fillValue)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(tensorShape.data()),
+                                                    sizeof(int32_t) * tensorShape.size())));
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(fillValue.data()),
+                                                    sizeof(T) * fillValue.size())));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
+                                                                      inputShape.size()),
+                              tflite::TensorType_INT32,
+                              1,
+                              flatBufferBuilder.CreateString("dims"));
+
+    std::vector<int32_t> fillShape = {};
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(fillShape.data(),
+                                                                      fillShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("value"));
+
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              3,
+                              flatBufferBuilder.CreateString("output"));
+
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_FillOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateFillOptions(flatBufferBuilder).Union();
+
+    // create operator
+    const std::vector<int> operatorInputs{ {0, 1} };
+    const std::vector<int> operatorOutputs{ 2 };
+    flatbuffers::Offset <Operator> fillOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{ {0, 1} };
+    const std::vector<int> subgraphOutputs{ 2 };
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&fillOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Fill Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         fillOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+
+}
+
+template <typename T>
+void FillTest(tflite::BuiltinOperator fillOperatorCode,
+              tflite::TensorType tensorType,
+              const std::vector<armnn::BackendId>& backends,
+              std::vector<int32_t >& inputShape,
+              std::vector<int32_t >& tensorShape,
+              std::vector<T>& expectedOutputValues,
+              T fillValue)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateFillTfLiteModel<T>(fillOperatorCode,
+                                                             tensorType,
+                                                             inputShape,
+                                                             tensorShape,
+                                                             {fillValue});
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    CHECK(tfLiteModel != nullptr);
+
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
+}
+
+} // anonymous namespace
diff --git a/delegate/test/FullyConnectedTest.cpp b/delegate/test/FullyConnectedTest.cpp
new file mode 100644
index 0000000..3ef5ced
--- /dev/null
+++ b/delegate/test/FullyConnectedTest.cpp
@@ -0,0 +1,178 @@
+//
+// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "FullyConnectedTestHelper.hpp"
+
+namespace
+{
+
+void FullyConnectedFp32Test(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
+{
+    std::vector<int32_t> inputTensorShape   { 1, 4, 1, 1 };
+    std::vector<int32_t> weightsTensorShape { 1, 4 };
+    std::vector<int32_t> biasTensorShape    { 1 };
+    std::vector<int32_t> outputTensorShape  { 1, 1 };
+
+    std::vector<float> inputValues = { 10, 20, 30, 40 };
+    std::vector<float> weightsData = { 2, 3, 4, 5 };
+
+    std::vector<float> expectedOutputValues = { (400 + 10) };
+
+    // bias is set std::vector<float> biasData = { 10 } in the model
+    FullyConnectedTest<float>(backends,
+                              ::tflite::TensorType_FLOAT32,
+                              tflite::ActivationFunctionType_NONE,
+                              inputTensorShape,
+                              weightsTensorShape,
+                              biasTensorShape,
+                              outputTensorShape,
+                              inputValues,
+                              expectedOutputValues,
+                              weightsData,
+                              constantWeights);
+}
+
+void FullyConnectedActivationTest(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
+{
+    std::vector<int32_t> inputTensorShape   { 1, 4, 1, 1 };
+    std::vector<int32_t> weightsTensorShape { 1, 4 };
+    std::vector<int32_t> biasTensorShape    { 1 };
+    std::vector<int32_t> outputTensorShape  { 1, 1 };
+
+    std::vector<float> inputValues = { -10, 20, 30, 40 };
+    std::vector<float> weightsData = { 2, 3, 4, -5 };
+
+    std::vector<float> expectedOutputValues = { 0 };
+
+    // bias is set std::vector<float> biasData = { 10 } in the model
+    FullyConnectedTest<float>(backends,
+                              ::tflite::TensorType_FLOAT32,
+                              tflite::ActivationFunctionType_RELU,
+                              inputTensorShape,
+                              weightsTensorShape,
+                              biasTensorShape,
+                              outputTensorShape,
+                              inputValues,
+                              expectedOutputValues,
+                              weightsData,
+                              constantWeights);
+}
+
+void FullyConnectedInt8Test(std::vector<armnn::BackendId>& backends, bool constantWeights = true)
+{
+    std::vector<int32_t> inputTensorShape   { 1, 4, 2, 1 };
+    std::vector<int32_t> weightsTensorShape { 1, 4 };
+    std::vector<int32_t> biasTensorShape    { 1 };
+    std::vector<int32_t> outputTensorShape  { 2, 1 };
+
+    std::vector<int8_t> inputValues = { 1, 2, 3, 4, 5, 10, 15, 20 };
+    std::vector<int8_t> weightsData = { 2, 3, 4, 5 };
+
+    std::vector<int8_t> expectedOutputValues = { 25, 105 };  // (40 + 10) / 2, (200 + 10) / 2
+
+    // bias is set std::vector<int32_t> biasData = { 10 } in the model
+    // input and weights quantization scale 1.0f and offset 0 in the model
+    // output quantization scale 2.0f and offset 0 in the model
+    FullyConnectedTest<int8_t>(backends,
+                                ::tflite::TensorType_INT8,
+                                tflite::ActivationFunctionType_NONE,
+                                inputTensorShape,
+                                weightsTensorShape,
+                                biasTensorShape,
+                                outputTensorShape,
+                                inputValues,
+                                expectedOutputValues,
+                                weightsData,
+                                constantWeights);
+}
+
+TEST_SUITE("FullyConnected_GpuAccTests")
+{
+
+TEST_CASE ("FullyConnected_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    FullyConnectedFp32Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Int8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    FullyConnectedInt8Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Activation_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    FullyConnectedActivationTest(backends);
+}
+
+} // End of TEST_SUITE("FullyConnected_GpuAccTests")
+
+TEST_SUITE("FullyConnected_CpuAccTests")
+{
+
+TEST_CASE ("FullyConnected_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    FullyConnectedFp32Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Int8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    FullyConnectedInt8Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Activation_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    FullyConnectedActivationTest(backends);
+}
+
+} // End of TEST_SUITE("FullyConnected_CpuAccTests")
+
+TEST_SUITE("FullyConnected_CpuRefTests")
+{
+
+TEST_CASE ("FullyConnected_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    FullyConnectedFp32Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Int8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    FullyConnectedInt8Test(backends);
+}
+
+TEST_CASE ("FullyConnected_Activation_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    FullyConnectedActivationTest(backends);
+}
+
+TEST_CASE ("FullyConnected_Weights_As_Inputs_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    FullyConnectedFp32Test(backends, false);
+}
+
+TEST_CASE ("FullyConnected_Weights_As_Inputs_Int8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    FullyConnectedInt8Test(backends, false);
+}
+
+TEST_CASE ("FullyConnected_Weights_As_Inputs_Activation_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    FullyConnectedActivationTest(backends, false);
+}
+
+} // End of TEST_SUITE("FullyConnected_CpuRefTests")
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/FullyConnectedTestHelper.hpp b/delegate/test/FullyConnectedTestHelper.hpp
new file mode 100644
index 0000000..d6bbd93
--- /dev/null
+++ b/delegate/test/FullyConnectedTestHelper.hpp
@@ -0,0 +1,255 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template <typename T>
+std::vector<char> CreateFullyConnectedTfLiteModel(tflite::TensorType tensorType,
+                                                  tflite::ActivationFunctionType activationType,
+                                                  const std::vector <int32_t>& inputTensorShape,
+                                                  const std::vector <int32_t>& weightsTensorShape,
+                                                  const std::vector <int32_t>& biasTensorShape,
+                                                  std::vector <int32_t>& outputTensorShape,
+                                                  std::vector <T>& weightsData,
+                                                  bool constantWeights = true,
+                                                  float quantScale = 1.0f,
+                                                  int quantOffset  = 0,
+                                                  float outputQuantScale = 2.0f,
+                                                  int outputQuantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+    std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder);
+    buffers[1] = CreateBuffer(flatBufferBuilder);
+
+    auto biasTensorType = ::tflite::TensorType_FLOAT32;
+    if (tensorType == ::tflite::TensorType_INT8)
+    {
+        biasTensorType = ::tflite::TensorType_INT32;
+    }
+    if (constantWeights)
+    {
+        buffers[2] = CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(weightsData.data()),
+                                                    sizeof(T) * weightsData.size()));
+
+        if (tensorType == ::tflite::TensorType_INT8)
+        {
+            std::vector<int32_t> biasData = { 10 };
+            buffers[3] = CreateBuffer(flatBufferBuilder,
+                                      flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
+                                                                     sizeof(int32_t) * biasData.size()));
+
+        }
+        else
+        {
+            std::vector<float> biasData = { 10 };
+            buffers[3] = CreateBuffer(flatBufferBuilder,
+                                      flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
+                                                                     sizeof(float) * biasData.size()));
+        }
+    }
+    else
+    {
+        buffers[2] = CreateBuffer(flatBufferBuilder);
+        buffers[3] = CreateBuffer(flatBufferBuilder);
+    }
+    buffers[4] = CreateBuffer(flatBufferBuilder);
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    auto outputQuantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 4> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input_0"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(weightsTensorShape.data(),
+                                                                      weightsTensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("weights"),
+                              quantizationParameters);
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(),
+                                                                      biasTensorShape.size()),
+                              biasTensorType,
+                              3,
+                              flatBufferBuilder.CreateString("bias"),
+                              quantizationParameters);
+
+    tensors[3] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              4,
+                              flatBufferBuilder.CreateString("output"),
+                              outputQuantizationParameters);
+
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_FullyConnectedOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions =
+        CreateFullyConnectedOptions(flatBufferBuilder,
+                                    activationType,
+                                    FullyConnectedOptionsWeightsFormat_DEFAULT, false).Union();
+
+    const std::vector<int> operatorInputs{0, 1, 2};
+    const std::vector<int> operatorOutputs{3};
+    flatbuffers::Offset <Operator> fullyConnectedOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType, operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{0, 1, 2};
+    const std::vector<int> subgraphOutputs{3};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&fullyConnectedOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: FullyConnected Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         tflite::BuiltinOperator_FULLY_CONNECTED);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void FullyConnectedTest(std::vector<armnn::BackendId>& backends,
+                        tflite::TensorType tensorType,
+                        tflite::ActivationFunctionType activationType,
+                        const std::vector <int32_t>& inputTensorShape,
+                        const std::vector <int32_t>& weightsTensorShape,
+                        const std::vector <int32_t>& biasTensorShape,
+                        std::vector <int32_t>& outputTensorShape,
+                        std::vector <T>& inputValues,
+                        std::vector <T>& expectedOutputValues,
+                        std::vector <T>& weightsData,
+                        bool constantWeights = true,
+                        float quantScale = 1.0f,
+                        int quantOffset  = 0)
+{
+    using namespace tflite;
+
+    std::vector<char> modelBuffer = CreateFullyConnectedTfLiteModel(tensorType,
+                                                                    activationType,
+                                                                    inputTensorShape,
+                                                                    weightsTensorShape,
+                                                                    biasTensorShape,
+                                                                    outputTensorShape,
+                                                                    weightsData,
+                                                                    constantWeights,
+                                                                    quantScale,
+                                                                    quantOffset);
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                     armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+
+    if (!constantWeights)
+    {
+        armnnDelegate::FillInput<T>(tfLiteInterpreter, 1, weightsData);
+        armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 1, weightsData);
+
+        if (tensorType == ::tflite::TensorType_INT8)
+        {
+            std::vector <int32_t> biasData = {10};
+            armnnDelegate::FillInput<int32_t>(tfLiteInterpreter, 2, biasData);
+            armnnDelegate::FillInput<int32_t>(armnnDelegateInterpreter, 2, biasData);
+        }
+        else
+        {
+            std::vector<float> biasData = {10};
+            armnnDelegate::FillInput<float>(tfLiteInterpreter, 2, biasData);
+            armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 2, biasData);
+        }
+    }
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
+                                        armnnDelegateInterpreter,
+                                        outputTensorShape,
+                                        expectedOutputValues);
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/GatherNdTest.cpp b/delegate/test/GatherNdTest.cpp
new file mode 100644
index 0000000..066248f
--- /dev/null
+++ b/delegate/test/GatherNdTest.cpp
@@ -0,0 +1,113 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GatherNdTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+// GATHER_ND Operator
+void GatherNdUint8Test(std::vector<armnn::BackendId>& backends)
+{
+
+    std::vector<int32_t> paramsShape{ 5, 2 };
+    std::vector<int32_t> indicesShape{ 3, 1 };
+    std::vector<int32_t> expectedOutputShape{ 3, 2 };
+
+    std::vector<uint8_t> paramsValues{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+    std::vector<int32_t> indicesValues{ 1, 0, 4 };
+    std::vector<uint8_t> expectedOutputValues{ 3, 4, 1, 2, 9, 10 };
+
+    GatherNdTest<uint8_t>(::tflite::TensorType_UINT8,
+                          backends,
+                          paramsShape,
+                          indicesShape,
+                          expectedOutputShape,
+                          paramsValues,
+                          indicesValues,
+                          expectedOutputValues);
+}
+
+void GatherNdFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> paramsShape{ 5, 2 };
+    std::vector<int32_t> indicesShape{ 3, 1 };
+    std::vector<int32_t> expectedOutputShape{ 3, 2 };
+
+    std::vector<float>   paramsValues{ 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.10f };
+    std::vector<int32_t> indicesValues{ 1, 0, 4 };
+    std::vector<float>   expectedOutputValues{ 3.3f, 4.4f, 1.1f, 2.2f, 9.9f, 10.10f };
+
+    GatherNdTest<float>(::tflite::TensorType_FLOAT32,
+                        backends,
+                        paramsShape,
+                        indicesShape,
+                        expectedOutputShape,
+                        paramsValues,
+                        indicesValues,
+                        expectedOutputValues);
+}
+
+// GATHER_ND Test Suite
+TEST_SUITE("GATHER_ND_CpuRefTests")
+{
+
+TEST_CASE ("GATHER_ND_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    GatherNdUint8Test(backends);
+}
+
+TEST_CASE ("GATHER_ND_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    GatherNdFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("GATHER_ND_CpuAccTests")
+{
+
+TEST_CASE ("GATHER_ND_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    GatherNdUint8Test(backends);
+}
+
+TEST_CASE ("GATHER_ND_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    GatherNdFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("GATHER_ND_GpuAccTests")
+{
+
+TEST_CASE ("GATHER_ND_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    GatherNdUint8Test(backends);
+}
+
+TEST_CASE ("GATHER_ND_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    GatherNdFp32Test(backends);
+}
+
+}
+// End of GATHER_ND Test Suite
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/GatherNdTestHelper.hpp b/delegate/test/GatherNdTestHelper.hpp
new file mode 100644
index 0000000..7b1595b
--- /dev/null
+++ b/delegate/test/GatherNdTestHelper.hpp
@@ -0,0 +1,181 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateGatherNdTfLiteModel(tflite::TensorType tensorType,
+                                          std::vector<int32_t>& paramsShape,
+                                          std::vector<int32_t>& indicesShape,
+                                          const std::vector<int32_t>& expectedOutputShape,
+                                          float quantScale = 1.0f,
+                                          int quantOffset = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+             CreateQuantizationParameters(flatBufferBuilder,
+                                          0,
+                                          0,
+                                          flatBufferBuilder.CreateVector<float>({quantScale}),
+                                          flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(paramsShape.data(),
+                                                                      paramsShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("params"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(),
+                                                                      indicesShape.size()),
+                              ::tflite::TensorType_INT32,
+                              2,
+                              flatBufferBuilder.CreateString("indices"),
+                              quantizationParameters);
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(expectedOutputShape.data(),
+                                                                      expectedOutputShape.size()),
+                              tensorType,
+                              3,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+
+    // create operator
+    tflite::BuiltinOptions    operatorBuiltinOptionsType = tflite::BuiltinOptions_GatherNdOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions     = CreateGatherNdOptions(flatBufferBuilder).Union();
+
+    const std::vector<int>        operatorInputs{{0, 1}};
+    const std::vector<int>        operatorOutputs{2};
+    flatbuffers::Offset<Operator> controlOperator        =
+                                      CreateOperator(flatBufferBuilder,
+                                                     0,
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
+                                                                                             operatorInputs.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+                                                                                             operatorOutputs.size()),
+                                                     operatorBuiltinOptionsType,
+                                                     operatorBuiltinOptions);
+
+    const std::vector<int>        subgraphInputs{{0, 1}};
+    const std::vector<int>        subgraphOutputs{2};
+    flatbuffers::Offset<SubGraph> subgraph               =
+                                      CreateSubGraph(flatBufferBuilder,
+                                                     flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(),
+                                                                                             subgraphInputs.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
+                                                                                             subgraphOutputs.size()),
+                                                     flatBufferBuilder.CreateVector(&controlOperator, 1));
+
+    flatbuffers::Offset<flatbuffers::String> modelDescription =
+                                             flatBufferBuilder.CreateString("ArmnnDelegate: GATHER_ND Operator Model");
+    flatbuffers::Offset<OperatorCode>        operatorCode     = CreateOperatorCode(flatBufferBuilder,
+                                                                                   BuiltinOperator_GATHER_ND);
+
+    flatbuffers::Offset<Model> flatbufferModel =
+                                   CreateModel(flatBufferBuilder,
+                                               TFLITE_SCHEMA_VERSION,
+                                               flatBufferBuilder.CreateVector(&operatorCode, 1),
+                                               flatBufferBuilder.CreateVector(&subgraph, 1),
+                                               modelDescription,
+                                               flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template<typename T>
+void GatherNdTest(tflite::TensorType tensorType,
+                std::vector<armnn::BackendId>& backends,
+                std::vector<int32_t>& paramsShape,
+                std::vector<int32_t>& indicesShape,
+                std::vector<int32_t>& expectedOutputShape,
+                std::vector<T>& paramsValues,
+                std::vector<int32_t>& indicesValues,
+                std::vector<T>& expectedOutputValues,
+                float quantScale = 1.0f,
+                int quantOffset = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateGatherNdTfLiteModel(tensorType,
+                                                            paramsShape,
+                                                            indicesShape,
+                                                            expectedOutputShape,
+                                                            quantScale,
+                                                            quantOffset);
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegate) == kTfLiteOk);
+    CHECK(armnnDelegate != nullptr);
+    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteDelegate) == kTfLiteOk);
+    CHECK(tfLiteDelegate != nullptr);
+    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                     armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, paramsValues);
+    armnnDelegate::FillInput<T>(armnnDelegate, 0, paramsValues);
+    armnnDelegate::FillInput<int32_t>(tfLiteDelegate, 1, indicesValues);
+    armnnDelegate::FillInput<int32_t>(armnnDelegate, 1, indicesValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
+                                        armnnDelegate,
+                                        expectedOutputShape,
+                                        expectedOutputValues,
+                                        0);
+
+    tfLiteDelegate.reset(nullptr);
+    armnnDelegate.reset(nullptr);
+}
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/GatherTest.cpp b/delegate/test/GatherTest.cpp
new file mode 100644
index 0000000..e4012b4
--- /dev/null
+++ b/delegate/test/GatherTest.cpp
@@ -0,0 +1,117 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "GatherTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+// GATHER Operator
+void GatherUint8Test(std::vector<armnn::BackendId>& backends)
+{
+
+    std::vector<int32_t> paramsShape{8};
+    std::vector<int32_t> indicesShape{3};
+    std::vector<int32_t> expectedOutputShape{3};
+
+    int32_t              axis = 0;
+    std::vector<uint8_t> paramsValues{1, 2, 3, 4, 5, 6, 7, 8};
+    std::vector<int32_t> indicesValues{7, 6, 5};
+    std::vector<uint8_t> expectedOutputValues{8, 7, 6};
+
+    GatherTest<uint8_t>(::tflite::TensorType_UINT8,
+                        backends,
+                        paramsShape,
+                        indicesShape,
+                        expectedOutputShape,
+                        axis,
+                        paramsValues,
+                        indicesValues,
+                        expectedOutputValues);
+}
+
+void GatherFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> paramsShape{8};
+    std::vector<int32_t> indicesShape{3};
+    std::vector<int32_t> expectedOutputShape{3};
+
+    int32_t              axis = 0;
+    std::vector<float>   paramsValues{1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f};
+    std::vector<int32_t> indicesValues{7, 6, 5};
+    std::vector<float>   expectedOutputValues{8.8f, 7.7f, 6.6f};
+
+    GatherTest<float>(::tflite::TensorType_FLOAT32,
+                      backends,
+                      paramsShape,
+                      indicesShape,
+                      expectedOutputShape,
+                      axis,
+                      paramsValues,
+                      indicesValues,
+                      expectedOutputValues);
+}
+
+// GATHER Test Suite
+TEST_SUITE("GATHER_CpuRefTests")
+{
+
+TEST_CASE ("GATHER_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    GatherUint8Test(backends);
+}
+
+TEST_CASE ("GATHER_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    GatherFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("GATHER_CpuAccTests")
+{
+
+TEST_CASE ("GATHER_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    GatherUint8Test(backends);
+}
+
+TEST_CASE ("GATHER_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    GatherFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("GATHER_GpuAccTests")
+{
+
+TEST_CASE ("GATHER_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    GatherUint8Test(backends);
+}
+
+TEST_CASE ("GATHER_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    GatherFp32Test(backends);
+}
+
+}
+// End of GATHER Test Suite
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/GatherTestHelper.hpp b/delegate/test/GatherTestHelper.hpp
new file mode 100644
index 0000000..41e3b55
--- /dev/null
+++ b/delegate/test/GatherTestHelper.hpp
@@ -0,0 +1,184 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateGatherTfLiteModel(tflite::TensorType tensorType,
+                                          std::vector<int32_t>& paramsShape,
+                                          std::vector<int32_t>& indicesShape,
+                                          const std::vector<int32_t>& expectedOutputShape,
+                                          int32_t axis,
+                                          float quantScale = 1.0f,
+                                          int quantOffset = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+             CreateQuantizationParameters(flatBufferBuilder,
+                                          0,
+                                          0,
+                                          flatBufferBuilder.CreateVector<float>({quantScale}),
+                                          flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(paramsShape.data(),
+                                                                      paramsShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("params"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(),
+                                                                      indicesShape.size()),
+                              ::tflite::TensorType_INT32,
+                              2,
+                              flatBufferBuilder.CreateString("indices"),
+                              quantizationParameters);
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(expectedOutputShape.data(),
+                                                                      expectedOutputShape.size()),
+                              tensorType,
+                              3,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+
+    // create operator
+    tflite::BuiltinOptions    operatorBuiltinOptionsType = tflite::BuiltinOptions_GatherOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions     = CreateGatherOptions(flatBufferBuilder).Union();
+
+    const std::vector<int>        operatorInputs{{0, 1}};
+    const std::vector<int>        operatorOutputs{2};
+    flatbuffers::Offset<Operator> controlOperator        =
+                                      CreateOperator(flatBufferBuilder,
+                                                     0,
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
+                                                                                             operatorInputs.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+                                                                                             operatorOutputs.size()),
+                                                     operatorBuiltinOptionsType,
+                                                     operatorBuiltinOptions);
+
+    const std::vector<int>        subgraphInputs{{0, 1}};
+    const std::vector<int>        subgraphOutputs{2};
+    flatbuffers::Offset<SubGraph> subgraph               =
+                                      CreateSubGraph(flatBufferBuilder,
+                                                     flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(),
+                                                                                             subgraphInputs.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
+                                                                                             subgraphOutputs.size()),
+                                                     flatBufferBuilder.CreateVector(&controlOperator, 1));
+
+    flatbuffers::Offset<flatbuffers::String> modelDescription =
+                                                 flatBufferBuilder.CreateString("ArmnnDelegate: GATHER Operator Model");
+    flatbuffers::Offset<OperatorCode>        operatorCode     = CreateOperatorCode(flatBufferBuilder,
+                                                                                   BuiltinOperator_GATHER);
+
+    flatbuffers::Offset<Model> flatbufferModel =
+                                   CreateModel(flatBufferBuilder,
+                                               TFLITE_SCHEMA_VERSION,
+                                               flatBufferBuilder.CreateVector(&operatorCode, 1),
+                                               flatBufferBuilder.CreateVector(&subgraph, 1),
+                                               modelDescription,
+                                               flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template<typename T>
+void GatherTest(tflite::TensorType tensorType,
+                std::vector<armnn::BackendId>& backends,
+                std::vector<int32_t>& paramsShape,
+                std::vector<int32_t>& indicesShape,
+                std::vector<int32_t>& expectedOutputShape,
+                int32_t axis,
+                std::vector<T>& paramsValues,
+                std::vector<int32_t>& indicesValues,
+                std::vector<T>& expectedOutputValues,
+                float quantScale = 1.0f,
+                int quantOffset = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateGatherTfLiteModel(tensorType,
+                                                            paramsShape,
+                                                            indicesShape,
+                                                            expectedOutputShape,
+                                                            axis,
+                                                            quantScale,
+                                                            quantOffset);
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegate) == kTfLiteOk);
+    CHECK(armnnDelegate != nullptr);
+    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteDelegate) == kTfLiteOk);
+    CHECK(tfLiteDelegate != nullptr);
+    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                     armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, paramsValues);
+    armnnDelegate::FillInput<T>(armnnDelegate, 0, paramsValues);
+    armnnDelegate::FillInput<int32_t>(tfLiteDelegate, 1, indicesValues);
+    armnnDelegate::FillInput<int32_t>(armnnDelegate, 1, indicesValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
+                                        armnnDelegate,
+                                        expectedOutputShape,
+                                        expectedOutputValues,
+                                        0);
+
+    tfLiteDelegate.reset(nullptr);
+    armnnDelegate.reset(nullptr);
+}
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/LogicalTest.cpp b/delegate/test/LogicalTest.cpp
new file mode 100644
index 0000000..57bbd31
--- /dev/null
+++ b/delegate/test/LogicalTest.cpp
@@ -0,0 +1,226 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ElementwiseUnaryTestHelper.hpp"
+#include "LogicalTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void LogicalBinaryAndBoolTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
+
+    // Set input and output values
+    std::vector<bool> input0Values { 0, 0, 1, 1 };
+    std::vector<bool> input1Values { 0, 1, 0, 1 };
+    std::vector<bool> expectedOutputValues { 0, 0, 0, 1 };
+
+    LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_AND,
+                            ::tflite::TensorType_BOOL,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void LogicalBinaryAndBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
+
+    std::vector<bool> input0Values { 0, 1, 0, 1 };
+    std::vector<bool> input1Values { 1 };
+    std::vector<bool> expectedOutputValues { 0, 1, 0, 1 };
+
+    LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_AND,
+                            ::tflite::TensorType_BOOL,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void LogicalBinaryOrBoolTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 2, 2 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
+
+    std::vector<bool> input0Values { 0, 0, 1, 1 };
+    std::vector<bool> input1Values { 0, 1, 0, 1 };
+    std::vector<bool> expectedOutputValues { 0, 1, 1, 1 };
+
+    LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_OR,
+                            ::tflite::TensorType_BOOL,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+void LogicalBinaryOrBroadcastTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> input0Shape { 1, 2, 2 };
+    std::vector<int32_t> input1Shape { 1, 1, 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 2, 2 };
+
+    std::vector<bool> input0Values { 0, 1, 0, 1 };
+    std::vector<bool> input1Values { 1 };
+    std::vector<bool> expectedOutputValues { 1, 1, 1, 1 };
+
+    LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_OR,
+                            ::tflite::TensorType_BOOL,
+                            backends,
+                            input0Shape,
+                            input1Shape,
+                            expectedOutputShape,
+                            input0Values,
+                            input1Values,
+                            expectedOutputValues);
+}
+
+// LogicalNot operator uses ElementwiseUnary unary layer and descriptor but is still classed as logical operator.
+void LogicalNotBoolTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 1, 2, 2 };
+
+    std::vector<bool> inputValues { 0, 1, 0, 1 };
+    std::vector<bool> expectedOutputValues { 1, 0, 1, 0 };
+
+    ElementwiseUnaryBoolTest(tflite::BuiltinOperator_LOGICAL_NOT,
+                             backends,
+                             inputShape,
+                             inputValues,
+                             expectedOutputValues);
+}
+
+TEST_SUITE("LogicalBinaryTests_GpuAccTests")
+{
+
+TEST_CASE ("LogicalBinary_AND_Bool_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LogicalBinaryAndBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_AND_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LogicalBinaryAndBroadcastTest(backends);
+}
+
+TEST_CASE ("Logical_NOT_Bool_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LogicalNotBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Bool_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LogicalBinaryOrBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Broadcast_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LogicalBinaryOrBroadcastTest(backends);
+}
+
+}
+
+
+TEST_SUITE("LogicalBinaryTests_CpuAccTests")
+{
+
+TEST_CASE ("LogicalBinary_AND_Bool_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LogicalBinaryAndBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_AND_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LogicalBinaryAndBroadcastTest(backends);
+}
+
+TEST_CASE ("Logical_NOT_Bool_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LogicalNotBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Bool_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LogicalBinaryOrBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Broadcast_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LogicalBinaryOrBroadcastTest(backends);
+}
+
+}
+
+
+TEST_SUITE("LogicalBinaryTests_CpuRefTests")
+{
+
+TEST_CASE ("LogicalBinary_AND_Bool_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LogicalBinaryAndBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_AND_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LogicalBinaryAndBroadcastTest(backends);
+}
+
+TEST_CASE ("Logical_NOT_Bool_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LogicalNotBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Bool_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LogicalBinaryOrBoolTest(backends);
+}
+
+TEST_CASE ("LogicalBinary_OR_Broadcast_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LogicalBinaryOrBroadcastTest(backends);
+}
+
+}
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/LogicalTestHelper.hpp b/delegate/test/LogicalTestHelper.hpp
new file mode 100644
index 0000000..2f2ae7b
--- /dev/null
+++ b/delegate/test/LogicalTestHelper.hpp
@@ -0,0 +1,201 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateLogicalBinaryTfLiteModel(tflite::BuiltinOperator logicalOperatorCode,
+                                                 tflite::TensorType tensorType,
+                                                 const std::vector <int32_t>& input0TensorShape,
+                                                 const std::vector <int32_t>& input1TensorShape,
+                                                 const std::vector <int32_t>& outputTensorShape,
+                                                 float quantScale = 1.0f,
+                                                 int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
+                                                                      input0TensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input_0"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
+                                                                      input1TensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("input_1"),
+                              quantizationParameters);
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              3,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
+    flatbuffers::Offset<void> operatorBuiltinOptions = 0;
+    switch (logicalOperatorCode)
+    {
+        case BuiltinOperator_LOGICAL_AND:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_LogicalAndOptions;
+            operatorBuiltinOptions = CreateLogicalAndOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_LOGICAL_OR:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_LogicalOrOptions;
+            operatorBuiltinOptions = CreateLogicalOrOptions(flatBufferBuilder).Union();
+            break;
+        }
+        default:
+            break;
+    }
+    const std::vector<int32_t> operatorInputs{ {0, 1} };
+    const std::vector<int32_t> operatorOutputs{ 2 };
+    flatbuffers::Offset <Operator> logicalBinaryOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{ {0, 1} };
+    const std::vector<int> subgraphOutputs{ 2 };
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&logicalBinaryOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Logical Binary Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, logicalOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void LogicalBinaryTest(tflite::BuiltinOperator logicalOperatorCode,
+                       tflite::TensorType tensorType,
+                       std::vector<armnn::BackendId>& backends,
+                       std::vector<int32_t>& input0Shape,
+                       std::vector<int32_t>& input1Shape,
+                       std::vector<int32_t>& expectedOutputShape,
+                       std::vector<T>& input0Values,
+                       std::vector<T>& input1Values,
+                       std::vector<T>& expectedOutputValues,
+                       float quantScale = 1.0f,
+                       int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateLogicalBinaryTfLiteModel(logicalOperatorCode,
+                                                                   tensorType,
+                                                                   input0Shape,
+                                                                   input1Shape,
+                                                                   expectedOutputShape,
+                                                                   quantScale,
+                                                                   quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data for the armnn interpreter
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values);
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values);
+
+    // Set input data for the tflite interpreter
+    armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values);
+    armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function
+    // directly. This is because Boolean types get converted to a bit representation in a vector.
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
+
+    armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size());
+    armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+
+    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/LstmTest.cpp b/delegate/test/LstmTest.cpp
new file mode 100644
index 0000000..1034a01
--- /dev/null
+++ b/delegate/test/LstmTest.cpp
@@ -0,0 +1,189 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "LstmTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void LstmTest(std::vector<armnn::BackendId>& backends)
+{
+    int32_t batchSize = 2;
+    int32_t inputSize = 2;
+    int32_t outputSize = 4;
+    // cellSize and outputSize have the same size when there is no projection.
+    int32_t numUnits = outputSize;
+
+    std::vector<int32_t> inputShape {batchSize , inputSize};
+    std::vector<int32_t> cellStateInTensorInfo {batchSize , numUnits};
+    std::vector<int32_t> outputStateInTensorInfo {batchSize , outputSize};
+
+    std::vector<int32_t> scratchBufferTensorInfo {batchSize, numUnits * 4};
+    std::vector<int32_t> cellStateOutTensorInfo {batchSize, numUnits};
+    std::vector<int32_t> outputStateOutTensorInfo {batchSize, outputSize};
+    std::vector<int32_t> outputTensorInfo {batchSize, outputSize};
+
+    std::vector<int32_t> tensorInfo4 {numUnits};
+    std::vector<int32_t> tensorInfo8 {numUnits, 2};
+    std::vector<int32_t> tensorInfo16 {numUnits, 4};
+
+    //tensorInfo8,
+    bool hasInputToInputWeights = true;
+    std::vector<float> inputToInputWeights {-0.45018822f, -0.02338299f, -0.0870589f,
+                                            -0.34550029f, 0.04266912f, -0.15680569f,
+                                            -0.34856534f, 0.43890524f};
+
+    std::vector<float> inputToForgetWeights {0.09701663f, 0.20334584f, -0.50592935f,
+                                             -0.31343272f, -0.40032279f, 0.44781327f,
+                                             0.01387155f, -0.35593212f};
+
+    std::vector<float> inputToCellWeights {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f,
+                                           -0.20583314f, 0.44344562f, 0.22077113f,
+                                           -0.29909778f};
+
+    std::vector<float> inputToOutputWeights {-0.25065863f, -0.28290087f, 0.04613829f,
+                                             0.40525138f, 0.44272184f, 0.03897077f,
+                                             -0.1556896f, 0.19487578f};
+
+    //tensorInfo16,
+    bool hasRecurrentToInputWeights = true;
+    std::vector<float> recurrentToInputWeights {-0.0063535f, -0.2042388f, 0.31454784f,
+                                                -0.35746509f, 0.28902304f, 0.08183324f,
+                                                -0.16555229f, 0.02286911f, -0.13566875f,
+                                                0.03034258f, 0.48091322f, -0.12528998f,
+                                                0.24077177f, -0.51332325f, -0.33502164f,
+                                                0.10629296f};
+
+    std::vector<float> recurrentToForgetWeights {-0.48684245f, -0.06655136f, 0.42224967f,
+                                                 0.2112639f, 0.27654213f, 0.20864892f,
+                                                 -0.07646349f, 0.45877004f, 0.00141793f,
+                                                 -0.14609534f, 0.36447752f, 0.09196436f,
+                                                 0.28053468f, 0.01560611f, -0.20127171f,
+                                                 -0.01140004f};
+
+    std::vector<float> recurrentToCellWeights {-0.3407414f, 0.24443203f, -0.2078532f,
+                                               0.26320225f, 0.05695659f, -0.00123841f,
+                                               -0.4744786f, -0.35869038f, -0.06418842f,
+                                               -0.13502428f, -0.501764f, 0.22830659f,
+                                               -0.46367589f, 0.26016325f, -0.03894562f,
+                                               -0.16368064f};
+
+    std::vector<float> recurrentToOutputWeights {0.43385774f, -0.17194885f, 0.2718237f,
+                                                 0.09215671f, 0.24107647f, -0.39835793f,
+                                                 0.18212086f, 0.01301402f, 0.48572797f,
+                                                 -0.50656658f, 0.20047462f, -0.20607421f,
+                                                 -0.51818722f, -0.15390486f, 0.0468148f,
+                                                 0.39922136f};
+    // tensorInfo4
+    bool hasCellToInputWeights = false;
+    std::vector<float> cellToInputWeights {};
+    bool hasCellToForgetWeights = false;
+    std::vector<float> cellToForgetWeights {};
+    bool hasCellToOutputWeights = false;
+    std::vector<float> cellToOutputWeights {};
+
+    bool hasInputGateBias = true;
+    std::vector<float> inputGateBias {0., 0., 0., 0.};
+    std::vector<float> forgetGateBias {1., 1., 1., 1.};
+    std::vector<float> cellBias {0., 0., 0., 0.};
+    std::vector<float> outputGateBias {0., 0., 0., 0.};
+
+    bool hasProjectionWeights = false;
+    std::vector<float> projectionWeights;
+    bool hasProjectionBias = false;
+    std::vector<float> projectionBias;
+
+    bool hasInputLayerNormWeights = false;
+    std::vector<float> inputLayerNormWeights;
+    bool hasForgetLayerNormWeights = false;
+    std::vector<float> forgetLayerNormWeights;
+    bool hasCellLayerNormWeights = false;
+    std::vector<float> cellLayerNormWeights;
+    bool hasOutputLayerNormWeights = false;
+    std::vector<float> outputLayerNormWeights;
+
+    std::vector<float> inputValues {2., 3., 3., 4.};
+    std::vector<float> expectedOutputValues {-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
+                                             -0.0185422f,   0.11281417f,  0.24466537f, -0.1826292f};
+
+    tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+    float clippingThresCell = 0.f;
+    float clippingThresProj = 0.f;
+
+    LstmTestImpl<float>(backends,
+                        ::tflite::TensorType_FLOAT32,
+                        batchSize,
+                        inputSize,
+                        outputSize,
+                        numUnits,
+                        hasInputToInputWeights,
+                        inputToInputWeights,
+                        inputToForgetWeights,
+                        inputToCellWeights,
+                        inputToOutputWeights,
+                        hasRecurrentToInputWeights,
+                        recurrentToInputWeights,
+                        recurrentToForgetWeights,
+                        recurrentToCellWeights,
+                        recurrentToOutputWeights,
+                        hasCellToInputWeights,
+                        cellToInputWeights,
+                        hasCellToForgetWeights,
+                        cellToForgetWeights,
+                        hasCellToOutputWeights,
+                        cellToOutputWeights,
+                        hasInputGateBias,
+                        inputGateBias,
+                        forgetGateBias,
+                        cellBias,
+                        outputGateBias,
+                        hasProjectionWeights,
+                        projectionWeights,
+                        hasProjectionBias,
+                        projectionBias,
+                        hasInputLayerNormWeights,
+                        inputLayerNormWeights,
+                        hasForgetLayerNormWeights,
+                        forgetLayerNormWeights,
+                        hasCellLayerNormWeights,
+                        cellLayerNormWeights,
+                        hasOutputLayerNormWeights,
+                        outputLayerNormWeights,
+                        inputValues,
+                        expectedOutputValues,
+                        activationFunction,
+                        clippingThresCell,
+                        clippingThresProj);
+}
+
+TEST_SUITE("LstmTest_CpuRefTests")
+{
+
+TEST_CASE ("LstmTest_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    LstmTest(backends);
+}
+
+} //End of TEST_SUITE("Convolution2dTest_CpuRef")
+
+TEST_SUITE("LstmTest_CpuAccTests")
+{
+
+TEST_CASE ("LstmTest_CpuAcc_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    LstmTest(backends);
+}
+
+} //End of TEST_SUITE("Convolution2dTest_CpuAcc")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/LstmTestHelper.hpp b/delegate/test/LstmTestHelper.hpp
new file mode 100644
index 0000000..14776ca
--- /dev/null
+++ b/delegate/test/LstmTestHelper.hpp
@@ -0,0 +1,691 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+#include <tensorflow/lite/c/common.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template <typename T>
+std::vector<char> CreateLstmTfLiteModel(tflite::TensorType tensorType,
+                                        int32_t batchSize,
+                                        int32_t inputSize,
+                                        int32_t outputSize,
+                                        int32_t numUnits,
+                                        bool hasInputToInputWeights,
+                                        const std::vector<T>& inputToInputWeights,
+                                        const std::vector<T>& inputToForgetWeights,
+                                        const std::vector<T>& inputToCellWeights,
+                                        const std::vector<T>& inputToOutputWeights,
+                                        bool hasRecurrentToInputWeights,
+                                        const std::vector<T>& recurrentToInputWeights,
+                                        const std::vector<T>& recurrentToForgetWeights,
+                                        const std::vector<T>& recurrentToCellWeights,
+                                        const std::vector<T>& recurrentToOutputWeights,
+                                        bool hasCellToInputWeights,
+                                        const std::vector<T>& cellToInputWeights,
+                                        bool hasCellToForgetWeights,
+                                        const std::vector<T>& cellToForgetWeights,
+                                        bool hasCellToOutputWeights,
+                                        const std::vector<T>& cellToOutputWeights,
+                                        bool hasInputGateBias,
+                                        const std::vector<T>& inputGateBias,
+                                        const std::vector<T>& forgetGateBias,
+                                        const std::vector<T>& cellBias,
+                                        const std::vector<T>& outputGateBias,
+                                        bool hasProjectionWeights,
+                                        const std::vector<T>& projectionWeights,
+                                        bool hasProjectionBias,
+                                        const std::vector<T>& projectionBias,
+                                        bool hasInputLayerNormWeights,
+                                        const std::vector<T>& inputLayerNormWeights,
+                                        bool hasForgetLayerNormWeights,
+                                        const std::vector<T>& forgetLayerNormWeights,
+                                        bool hasCellLayerNormWeights,
+                                        const std::vector<T>& cellLayerNormWeights,
+                                        bool hasOutputLayerNormWeights,
+                                        const std::vector<T>& outputLayerNormWeights,
+                                        tflite::ActivationFunctionType activationFunction,
+                                        float clippingThresCell,
+                                        float clippingThresProj,
+                                        float quantScale = 1.0f,
+                                        int quantOffset  = 0,
+                                        float outputQuantScale = 2.0f,
+                                        int outputQuantOffset  = 0)
+{
+
+    std::vector <int32_t> tensorInfo0 {};
+    std::vector <int32_t> tensorInfo4 {numUnits};
+    std::vector <int32_t> tensorInfo8 {numUnits, static_cast<int32_t>(2)};
+    std::vector <int32_t> tensorInfo16 {numUnits, static_cast<int32_t>(4)};
+
+    std::vector<int32_t> inputShape {batchSize , inputSize};
+    std::vector<int32_t> outputShape {batchSize , outputSize};
+
+    std::vector<int32_t> outputStateInDimensions{batchSize, outputSize};
+    std::vector<int32_t> cellStateInDimensions{batchSize, numUnits};
+
+    std::vector<int> operatorInputs;
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    std::vector<flatbuffers::Offset<Tensor>> tensors;
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    auto outputQuantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
+
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
+                                                                           inputShape.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("input_0"),
+                                   quantizationParameters));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    if (hasInputToInputWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToInputWeights.data()),
+                                                        sizeof(T) * inputToInputWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo8.data(),
+                                                                               tensorInfo8.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("inputToInputWeights"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToForgetWeights.data()),
+                                                    sizeof(T) * inputToForgetWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfo8.data(),
+                                                                           tensorInfo8.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("inputToForgetWeights"),
+                                   outputQuantizationParameters));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToCellWeights.data()),
+                                                    sizeof(T) * inputToCellWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfo8.data(),
+                                                                           tensorInfo8.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("inputToCellWeights"),
+                                   outputQuantizationParameters));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToOutputWeights.data()),
+                                                    sizeof(T) * inputToOutputWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfo8.data(),
+                                                                           tensorInfo8.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("inputToOutputWeights"),
+                                   outputQuantizationParameters));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    if (hasRecurrentToInputWeights)
+    {
+        buffers.push_back(CreateBuffer(
+            flatBufferBuilder,
+            flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(recurrentToInputWeights.data()),
+                                           sizeof(T) * recurrentToInputWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo16.data(),
+                                                                               tensorInfo16.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("recurrentToInputWeights"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToForgetWeights.data()),
+                                                    sizeof(T) * recurrentToForgetWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfo16.data(),
+                                                                           tensorInfo16.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("recurrentToForgetWeights"),
+                                   outputQuantizationParameters));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToCellWeights.data()),
+                                                    sizeof(T) * recurrentToCellWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfo16.data(),
+                                                                           tensorInfo16.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("recurrentToCellWeights"),
+                                   outputQuantizationParameters));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToOutputWeights.data()),
+                                                    sizeof(T) * recurrentToOutputWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfo16.data(),
+                                                                           tensorInfo16.size()),
+                                   tensorType,
+                                   buffers.size() - 1 ,
+                                   flatBufferBuilder.CreateString("recurrentToOutputWeights"),
+                                   outputQuantizationParameters));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    if (hasCellToInputWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToInputWeights.data()),
+                                                        sizeof(T) * cellToInputWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                               tensorInfo4.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("cellToInputWeights"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasCellToForgetWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToForgetWeights.data()),
+                                                        sizeof(T) * cellToForgetWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                               tensorInfo4.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("cellToForgetWeights"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasCellToOutputWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToOutputWeights.data()),
+                                                        sizeof(T) * cellToOutputWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                               tensorInfo4.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("cellToOutputWeights"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasInputGateBias)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputGateBias.data()),
+                                                        sizeof(T) * inputGateBias.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                               tensorInfo4.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("inputGateBias"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(forgetGateBias.data()),
+                                                    sizeof(T) * forgetGateBias.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                           tensorInfo4.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("forgetGateBias"),
+                                   outputQuantizationParameters));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(cellBias.data()),
+                                                    sizeof(T) * cellBias.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                           tensorInfo4.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("cellBias"),
+                                   outputQuantizationParameters));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(outputGateBias.data()),
+                                                    sizeof(T) * outputGateBias.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                           tensorInfo4.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("outputGateBias"),
+                                   outputQuantizationParameters));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    if (hasProjectionWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(projectionWeights.data()),
+                                                        sizeof(T) * projectionWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                               tensorInfo4.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("outputGateBias"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasProjectionBias)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(projectionBias.data()),
+                                                        sizeof(T) * projectionBias.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                               tensorInfo4.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("projectionBias"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(outputStateInDimensions.data(),
+                                                                           outputStateInDimensions.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("outputStateInInfo"),
+                                   outputQuantizationParameters,
+                                   true));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(cellStateInDimensions.data(),
+                                                                           cellStateInDimensions.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("cellStateInInfo"),
+                                   outputQuantizationParameters,
+                                   true));
+    operatorInputs.push_back(buffers.size() - 1);
+
+    if (hasInputLayerNormWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(
+                                              reinterpret_cast<const uint8_t *>(inputLayerNormWeights.data()),
+                                              sizeof(T) * inputLayerNormWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                               tensorInfo4.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("inputLayerNormWeights"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasForgetLayerNormWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(
+                                              reinterpret_cast<const uint8_t *>(forgetLayerNormWeights.data()),
+                                              sizeof(T) * forgetLayerNormWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                               tensorInfo4.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("forgetLayerNormWeights"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasCellLayerNormWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(cellLayerNormWeights.data()),
+                                                        sizeof(T) * cellLayerNormWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                               tensorInfo4.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("cellLayerNormWeights"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasOutputLayerNormWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(
+                             reinterpret_cast<const uint8_t *>(outputLayerNormWeights.data()),
+                             sizeof(T) * outputLayerNormWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfo4.data(),
+                                                                               tensorInfo4.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("outputLayerNormWeights"),
+                                       outputQuantizationParameters));
+        operatorInputs.push_back(buffers.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+    int outputBufferId = buffers.size();
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
+                                                                           outputShape.size()),
+                                   tensorType,
+                                   outputBufferId,
+                                   flatBufferBuilder.CreateString("output"),
+                                   outputQuantizationParameters));
+    std::vector<int> operatorOutputs;
+    operatorOutputs.push_back(buffers.size() - 1);
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_LSTMOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions =
+        CreateLSTMOptions(flatBufferBuilder,
+                          activationFunction,
+                          clippingThresCell,
+                          clippingThresProj).Union();
+
+    flatbuffers::Offset <Operator> lstmOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType, operatorBuiltinOptions);
+
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       flatBufferBuilder.CreateVector(&lstmOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: LSTM Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         tflite::BuiltinOperator_LSTM);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void LstmTestImpl(std::vector<armnn::BackendId>& backends,
+                  tflite::TensorType tensorType,
+                  int32_t batchSize,
+                  int32_t inputSize,
+                  int32_t outputSize,
+                  int32_t numUnits,
+                  bool hasInputToInputWeights,
+                  const std::vector<T>& inputToInputWeights,
+                  const std::vector<T>& inputToForgetWeights,
+                  const std::vector<T>& inputToCellWeights,
+                  const std::vector<T>& inputToOutputWeights,
+                  bool hasRecurrentToInputWeights,
+                  const std::vector<T>& recurrentToInputWeights,
+                  const std::vector<T>& recurrentToForgetWeights,
+                  const std::vector<T>& recurrentToCellWeights,
+                  const std::vector<T>& recurrentToOutputWeights,
+                  bool hasCellToInputWeights,
+                  const std::vector<T>& cellToInputWeights,
+                  bool hasCellToForgetWeights,
+                  const std::vector<T>& cellToForgetWeights,
+                  bool hasCellToOutputWeights,
+                  const std::vector<T>& cellToOutputWeights,
+                  bool hasInputGateBias,
+                  const std::vector<T>& inputGateBias,
+                  const std::vector<T>& forgetGateBias,
+                  const std::vector<T>& cellBias,
+                  const std::vector<T>& outputGateBias,
+                  bool hasProjectionWeights,
+                  const std::vector<T>& projectionWeights,
+                  bool hasProjectionBias,
+                  const std::vector<T>& projectionBias,
+                  bool hasInputLayerNormWeights,
+                  const std::vector<T>& inputLayerNormWeights,
+                  bool hasForgetLayerNormWeights,
+                  const std::vector<T>& forgetLayerNormWeights,
+                  bool hasCellLayerNormWeights,
+                  const std::vector<T>& cellLayerNormWeights,
+                  bool hasOutputLayerNormWeights,
+                  const std::vector<T>& outputLayerNormWeights,
+                  std::vector<T>& inputValues,
+                  std::vector<T>& expectedOutputValues,
+                  tflite::ActivationFunctionType activationFunction,
+                  float clippingThresCell,
+                  float clippingThresProj)
+{
+    using namespace tflite;
+
+    std::vector<char> modelBuffer = CreateLstmTfLiteModel(tensorType,
+                                                          batchSize,
+                                                          inputSize,
+                                                          outputSize,
+                                                          numUnits,
+                                                          hasInputToInputWeights,
+                                                          inputToInputWeights,
+                                                          inputToForgetWeights,
+                                                          inputToCellWeights,
+                                                          inputToOutputWeights,
+                                                          hasRecurrentToInputWeights,
+                                                          recurrentToInputWeights,
+                                                          recurrentToForgetWeights,
+                                                          recurrentToCellWeights,
+                                                          recurrentToOutputWeights,
+                                                          hasCellToInputWeights,
+                                                          cellToInputWeights,
+                                                          hasCellToForgetWeights,
+                                                          cellToForgetWeights,
+                                                          hasCellToOutputWeights,
+                                                          cellToOutputWeights,
+                                                          hasInputGateBias,
+                                                          inputGateBias,
+                                                          forgetGateBias,
+                                                          cellBias,
+                                                          outputGateBias,
+                                                          hasProjectionWeights,
+                                                          projectionWeights,
+                                                          hasProjectionBias,
+                                                          projectionBias,
+                                                          hasInputLayerNormWeights,
+                                                          inputLayerNormWeights,
+                                                          hasForgetLayerNormWeights,
+                                                          forgetLayerNormWeights,
+                                                          hasCellLayerNormWeights,
+                                                          cellLayerNormWeights,
+                                                          hasOutputLayerNormWeights,
+                                                          outputLayerNormWeights,
+                                                          activationFunction,
+                                                          clippingThresCell,
+                                                          clippingThresProj);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                     armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelageInputData[i] = inputValues[i];
+    }
+
+    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        armnnDelegateInputData[i] = inputValues[i];
+    }
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
+
+    armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size());
+    armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/MirrorPadTest.cpp b/delegate/test/MirrorPadTest.cpp
new file mode 100644
index 0000000..14c4755
--- /dev/null
+++ b/delegate/test/MirrorPadTest.cpp
@@ -0,0 +1,341 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "PadTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void MirrorPadSymmetric2dTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 3, 3 };
+    std::vector<int32_t> outputShape { 7, 7 };
+    std::vector<int32_t> paddingShape { 2, 2 };
+
+    std::vector<float> inputValues =
+    {
+        1.0f, 2.0f, 3.0f,
+        4.0f, 5.0f, 6.0f,
+        7.0f, 8.0f, 9.0f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f,
+        2.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 2.0f,
+        2.0f, 1.0f, 1.0f, 2.0f, 3.0f, 3.0f, 2.0f,
+        5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f,
+        8.0f, 7.0f, 7.0f, 8.0f, 9.0f, 9.0f, 8.0f,
+        8.0f, 7.0f, 7.0f, 8.0f, 9.0f, 9.0f, 8.0f,
+        5.0f, 4.0f, 4.0f, 5.0f, 6.0f, 6.0f, 5.0f
+    };
+
+    std::vector<int32_t> paddingDim = { 2, 2, 2, 2 };
+
+    PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
+                   ::tflite::TensorType_FLOAT32,
+                   backends,
+                   inputShape,
+                   paddingShape,
+                   outputShape,
+                   inputValues,
+                   paddingDim,
+                   expectedOutputValues,
+                   0,    // Padding value - Not used in these tests.
+                   1.0f, // Scale
+                   0,    // Offset
+                   tflite::MirrorPadMode_SYMMETRIC);
+}
+
+void MirrorPadReflect2dTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 3, 3 };
+    std::vector<int32_t> outputShape { 7, 7 };
+    std::vector<int32_t> paddingShape { 2, 2 };
+
+    std::vector<float> inputValues =
+    {
+        1.0f, 2.0f, 3.0f,
+        4.0f, 5.0f, 6.0f,
+        7.0f, 8.0f, 9.0f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        9.0f, 8.0f, 7.0f, 8.0f, 9.0f, 8.0f, 7.0f,
+        6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
+        3.0f, 2.0f, 1.0f, 2.0f, 3.0f, 2.0f, 1.0f,
+        6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
+        9.0f, 8.0f, 7.0f, 8.0f, 9.0f, 8.0f, 7.0f,
+        6.0f, 5.0f, 4.0f, 5.0f, 6.0f, 5.0f, 4.0f,
+        3.0f, 2.0f, 1.0f, 2.0f, 3.0f, 2.0f, 1.0f
+    };
+
+    std::vector<int32_t> paddingDim = { 2, 2, 2, 2 };
+
+    PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
+                   ::tflite::TensorType_FLOAT32,
+                   backends,
+                   inputShape,
+                   paddingShape,
+                   outputShape,
+                   inputValues,
+                   paddingDim,
+                   expectedOutputValues,
+                   0,    // Padding value - Not used in these tests.
+                   1.0f, // Scale
+                   0,    // Offset
+                   tflite::MirrorPadMode_REFLECT);
+}
+
+void MirrorPadSymmetric3dTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 2 };
+    std::vector<int32_t> outputShape { 4, 4, 4 };
+    std::vector<int32_t> paddingShape { 3, 2 };
+
+    std::vector<float> inputValues =
+    {
+        // Channel 0, Height (2) x Width (2)
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+
+        // Channel 1, Height (2) x Width (2)
+        5.0f, 6.0f,
+        7.0f, 8.0f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        1.0f, 1.0f, 2.0f, 2.0f,
+        1.0f, 1.0f, 2.0f, 2.0f,
+        3.0f, 3.0f, 4.0f, 4.0f,
+        3.0f, 3.0f, 4.0f, 4.0f,
+
+        1.0f, 1.0f, 2.0f, 2.0f,
+        1.0f, 1.0f, 2.0f, 2.0f,
+        3.0f, 3.0f, 4.0f, 4.0f,
+        3.0f, 3.0f, 4.0f, 4.0f,
+
+        5.0f, 5.0f, 6.0f, 6.0f,
+        5.0f, 5.0f, 6.0f, 6.0f,
+        7.0f, 7.0f, 8.0f, 8.0f,
+        7.0f, 7.0f, 8.0f, 8.0f,
+
+        5.0f, 5.0f, 6.0f, 6.0f,
+        5.0f, 5.0f, 6.0f, 6.0f,
+        7.0f, 7.0f, 8.0f, 8.0f,
+        7.0f, 7.0f, 8.0f, 8.0f
+    };
+
+    std::vector<int32_t> paddingDim = { 1, 1, 1, 1, 1, 1 };
+
+    PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
+                   ::tflite::TensorType_FLOAT32,
+                   backends,
+                   inputShape,
+                   paddingShape,
+                   outputShape,
+                   inputValues,
+                   paddingDim,
+                   expectedOutputValues,
+                   0,    // Padding value - Not used in these tests.
+                   1.0f, // Scale
+                   0,    // Offset
+                   tflite::MirrorPadMode_SYMMETRIC);
+}
+
+void MirrorPadReflect3dTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 2 };
+    std::vector<int32_t> outputShape { 4, 4, 4 };
+    std::vector<int32_t> paddingShape { 3, 2 };
+
+    std::vector<float> inputValues =
+    {
+        // Channel 0, Height (2) x Width (2)
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+
+        // Channel 1, Height (2) x Width (2)
+        5.0f, 6.0f,
+        7.0f, 8.0f
+    };
+
+    std::vector<float> expectedOutputValues =
+    {
+        8.0f, 7.0f, 8.0f, 7.0f,
+        6.0f, 5.0f, 6.0f, 5.0f,
+        8.0f, 7.0f, 8.0f, 7.0f,
+        6.0f, 5.0f, 6.0f, 5.0f,
+
+        4.0f, 3.0f, 4.0f, 3.0f,
+        2.0f, 1.0f, 2.0f, 1.0f,
+        4.0f, 3.0f, 4.0f, 3.0f,
+        2.0f, 1.0f, 2.0f, 1.0f,
+
+        8.0f, 7.0f, 8.0f, 7.0f,
+        6.0f, 5.0f, 6.0f, 5.0f,
+        8.0f, 7.0f, 8.0f, 7.0f,
+        6.0f, 5.0f, 6.0f, 5.0f,
+
+        4.0f, 3.0f, 4.0f, 3.0f,
+        2.0f, 1.0f, 2.0f, 1.0f,
+        4.0f, 3.0f, 4.0f, 3.0f,
+        2.0f, 1.0f, 2.0f, 1.0f
+    };
+
+    std::vector<int32_t> paddingDim = { 1, 1, 1, 1, 1, 1 };
+
+    PadTest<float>(tflite::BuiltinOperator_MIRROR_PAD,
+                   ::tflite::TensorType_FLOAT32,
+                   backends,
+                   inputShape,
+                   paddingShape,
+                   outputShape,
+                   inputValues,
+                   paddingDim,
+                   expectedOutputValues,
+                   0,    // Padding value - Not used in these tests.
+                   1.0f, // Scale
+                   0,    // Offset
+                   tflite::MirrorPadMode_REFLECT);
+}
+
+void MirrorPadSymmetricUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 3, 3 };
+    std::vector<int32_t> outputShape { 5, 7 };
+    std::vector<int32_t> paddingShape { 2, 2 };
+
+    std::vector<uint8_t> inputValues =
+    {
+        1, 2, 3,
+        4, 5, 6,
+        7, 8, 9
+    };
+
+    std::vector<uint8_t> expectedOutputValues =
+    {
+        2, 1, 1, 2, 3, 3, 2,
+        2, 1, 1, 2, 3, 3, 2,
+        5, 4, 4, 5, 6, 6, 5,
+        8, 7, 7, 8, 9, 9, 8,
+        8, 7, 7, 8, 9, 9, 8,
+    };
+
+    std::vector<int32_t> paddingDim = { 1, 1, 2, 2 };
+
+    PadTest<uint8_t>(tflite::BuiltinOperator_MIRROR_PAD,
+                     ::tflite::TensorType_UINT8,
+                     backends,
+                     inputShape,
+                     paddingShape,
+                     outputShape,
+                     inputValues,
+                     paddingDim,
+                     expectedOutputValues,
+                     0,    // Padding value - Not used in these tests.
+                     1.0f, // Scale
+                     1,    // Offset
+                     tflite::MirrorPadMode_SYMMETRIC);
+}
+
+void MirrorPadReflectInt8Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 3, 3 };
+    std::vector<int32_t> outputShape { 7, 5 };
+    std::vector<int32_t> paddingShape { 2, 2 };
+
+    std::vector<int8_t> inputValues =
+    {
+        1, 2, 3,
+        4, 5, 6,
+        7, 8, 9
+    };
+
+    std::vector<int8_t> expectedOutputValues =
+    {
+        8, 7, 8, 9, 8,
+        5, 4, 5, 6, 5,
+        2, 1, 2, 3, 2,
+        5, 4, 5, 6, 5,
+        8, 7, 8, 9, 8,
+        5, 4, 5, 6, 5,
+        2, 1, 2, 3, 2
+    };
+
+    std::vector<int32_t> paddingDim = { 2, 2, 1, 1 };
+
+    PadTest<int8_t>(tflite::BuiltinOperator_MIRROR_PAD,
+                    ::tflite::TensorType_INT8,
+                    backends,
+                    inputShape,
+                    paddingShape,
+                    outputShape,
+                    inputValues,
+                    paddingDim,
+                    expectedOutputValues,
+                    0,    // Padding value - Not used in these tests.
+                    1.0f, // Scale
+                    1,    // Offset
+                    tflite::MirrorPadMode_REFLECT);
+}
+
+TEST_SUITE("MirrorPad_CpuRefTests")
+{
+
+TEST_CASE ("MirrorPadSymmetric2d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MirrorPadSymmetric2dTest(backends);
+}
+
+TEST_CASE ("MirrorPadReflect2d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MirrorPadReflect2dTest(backends);
+}
+
+TEST_CASE ("MirrorPadSymmetric3d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MirrorPadSymmetric3dTest(backends);
+}
+
+TEST_CASE ("MirrorPadReflect3d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MirrorPadReflect3dTest(backends);
+}
+
+TEST_CASE ("MirrorPadSymmetricUint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MirrorPadSymmetricUint8Test(backends);
+}
+
+TEST_CASE ("MirrorPadSymmetricInt8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MirrorPadReflectInt8Test(backends);
+}
+
+} // TEST_SUITE("MirrorPad_CpuRefTests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/NeonDelegateTests_NDK_Issue.cpp b/delegate/test/NeonDelegateTests_NDK_Issue.cpp
new file mode 100644
index 0000000..5dad598
--- /dev/null
+++ b/delegate/test/NeonDelegateTests_NDK_Issue.cpp
@@ -0,0 +1,63 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NormalizationTestHelper.hpp"
+#include "SoftmaxTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+// There's a known Android NDK bug which causes this subset of Neon Tests to
+// fail. We'll exclude these tests in if we're doing
+// a debug build and NDK is less than r21.
+// The exclusion takes place in test/CMakeLists.txt
+// https://github.com/android/ndk/issues/1135
+
+TEST_SUITE ("Softmax_CpuAccTests")
+{
+
+TEST_CASE ("Softmax_Standard_Beta_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
+                                         0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
+    SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
+}
+
+TEST_CASE ("Softmax_Different_Beta_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    std::vector<float> expectedOutput = {
+        0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
+        0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
+    SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
+}
+
+TEST_CASE ("Log_Softmax_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    std::vector<float> expectedOutput =
+        {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
+         -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
+    SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
+}
+} // TEST_SUITE ("Softmax_CpuAccTests")
+
+TEST_SUITE("L2Normalization_CpuAccTests")
+{
+
+TEST_CASE ("L2NormalizationFp32Test_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    L2NormalizationTest(backends);
+}
+} // TEST_SUITE("L2NormalizationFp32Test_CpuAcc_Test")
+}
\ No newline at end of file
diff --git a/delegate/test/NormalizationTest.cpp b/delegate/test/NormalizationTest.cpp
new file mode 100644
index 0000000..b3a6f4b
--- /dev/null
+++ b/delegate/test/NormalizationTest.cpp
@@ -0,0 +1,72 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NormalizationTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+TEST_SUITE("L2Normalization_CpuRefTests")
+{
+
+TEST_CASE ("L2NormalizationFp32Test_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    L2NormalizationTest(backends);
+}
+
+} // TEST_SUITE("L2Normalization_CpuRefTests")
+
+TEST_SUITE("L2Normalization_GpuAccTests")
+{
+
+TEST_CASE ("L2NormalizationFp32Test_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    L2NormalizationTest(backends);
+}
+
+} // TEST_SUITE("L2Normalization_GpuAccTests")
+
+TEST_SUITE("LocalResponseNormalization_CpuRefTests")
+{
+
+TEST_CASE ("LocalResponseNormalizationTest_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    LocalResponseNormalizationTest(backends, 3, 1.f, 1.f, 1.f);
+}
+
+} // TEST_SUITE("LocalResponseNormalization_CpuRefTests")
+
+TEST_SUITE("LocalResponseNormalization_CpuAccTests")
+{
+
+TEST_CASE ("LocalResponseNormalizationTest_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    LocalResponseNormalizationTest(backends, 3, 1.f, 1.f, 1.f);
+}
+
+} // TEST_SUITE("LocalResponseNormalization_CpuAccTests")
+
+TEST_SUITE("LocalResponseNormalization_GpuAccTests")
+{
+
+TEST_CASE ("LocalResponseNormalizationTest_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    LocalResponseNormalizationTest(backends, 3, 1.f, 1.f, 1.f);
+}
+
+} // TEST_SUITE("LocalResponseNormalization_GpuAccTests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/NormalizationTestHelper.hpp b/delegate/test/NormalizationTestHelper.hpp
new file mode 100644
index 0000000..eafdf84
--- /dev/null
+++ b/delegate/test/NormalizationTestHelper.hpp
@@ -0,0 +1,263 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateNormalizationTfLiteModel(tflite::BuiltinOperator normalizationOperatorCode,
+                                                 tflite::TensorType tensorType,
+                                                 const std::vector<int32_t>& inputTensorShape,
+                                                 const std::vector<int32_t>& outputTensorShape,
+                                                 int32_t radius,
+                                                 float bias,
+                                                 float alpha,
+                                                 float beta,
+                                                 float quantScale = 1.0f,
+                                                 int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    auto inputTensor = CreateTensor(flatBufferBuilder,
+                                    flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                            inputTensorShape.size()),
+                                    tensorType,
+                                    1,
+                                    flatBufferBuilder.CreateString("input"),
+                                    quantizationParameters);
+
+    auto outputTensor = CreateTensor(flatBufferBuilder,
+                                     flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                             outputTensorShape.size()),
+                                     tensorType,
+                                     2,
+                                     flatBufferBuilder.CreateString("output"),
+                                     quantizationParameters);
+
+    std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, outputTensor };
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    std::vector<int32_t> operatorInputs = { 0 };
+    std::vector<int> subgraphInputs = { 0 };
+
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_L2NormOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateL2NormOptions(flatBufferBuilder,
+                                                                           tflite::ActivationFunctionType_NONE).Union();
+
+    if (normalizationOperatorCode == tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION)
+    {
+        operatorBuiltinOptionsType = BuiltinOptions_LocalResponseNormalizationOptions;
+        operatorBuiltinOptions =
+            CreateLocalResponseNormalizationOptions(flatBufferBuilder, radius, bias, alpha, beta).Union();
+    }
+
+    // create operator
+    const std::vector<int32_t> operatorOutputs{ 1 };
+    flatbuffers::Offset <Operator> normalizationOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphOutputs{ 1 };
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&normalizationOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Normalization Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         normalizationOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void NormalizationTest(tflite::BuiltinOperator normalizationOperatorCode,
+                       tflite::TensorType tensorType,
+                       const std::vector<armnn::BackendId>& backends,
+                       const std::vector<int32_t>& inputShape,
+                       std::vector<int32_t>& outputShape,
+                       std::vector<T>& inputValues,
+                       std::vector<T>& expectedOutputValues,
+                       int32_t radius = 0,
+                       float bias = 0.f,
+                       float alpha = 0.f,
+                       float beta = 0.f,
+                       float quantScale = 1.0f,
+                       int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateNormalizationTfLiteModel(normalizationOperatorCode,
+                                                                   tensorType,
+                                                                   inputShape,
+                                                                   outputShape,
+                                                                   radius,
+                                                                   bias,
+                                                                   alpha,
+                                                                   beta,
+                                                                   quantScale,
+                                                                   quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    CHECK(tfLiteModel != nullptr);
+
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+}
+
+void L2NormalizationTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape  { 1, 1, 1, 10 };
+    std::vector<int32_t> outputShape { 1, 1, 1, 10 };
+
+    std::vector<float> inputValues
+    {
+        1.0f,
+        2.0f,
+        3.0f,
+        4.0f,
+        5.0f,
+        6.0f,
+        7.0f,
+        8.0f,
+        9.0f,
+        10.0f
+    };
+
+    const float approxInvL2Norm = 0.050964719f;
+    std::vector<float> expectedOutputValues
+    {
+        1.0f  * approxInvL2Norm,
+        2.0f  * approxInvL2Norm,
+        3.0f  * approxInvL2Norm,
+        4.0f  * approxInvL2Norm,
+        5.0f  * approxInvL2Norm,
+        6.0f  * approxInvL2Norm,
+        7.0f  * approxInvL2Norm,
+        8.0f  * approxInvL2Norm,
+        9.0f  * approxInvL2Norm,
+        10.0f * approxInvL2Norm
+    };
+
+    NormalizationTest<float>(tflite::BuiltinOperator_L2_NORMALIZATION,
+                             ::tflite::TensorType_FLOAT32,
+                             backends,
+                             inputShape,
+                             outputShape,
+                             inputValues,
+                             expectedOutputValues);
+}
+
+void LocalResponseNormalizationTest(std::vector<armnn::BackendId>& backends,
+                                    int32_t radius,
+                                    float bias,
+                                    float alpha,
+                                    float beta)
+{
+    // Set input data
+    std::vector<int32_t> inputShape  { 2, 2, 2, 1 };
+    std::vector<int32_t> outputShape { 2, 2, 2, 1 };
+
+    std::vector<float> inputValues
+    {
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        5.0f, 6.0f,
+        7.0f, 8.0f
+    };
+
+    std::vector<float> expectedOutputValues
+    {
+        0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
+        0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f
+    };
+
+    NormalizationTest<float>(tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
+                             ::tflite::TensorType_FLOAT32,
+                             backends,
+                             inputShape,
+                             outputShape,
+                             inputValues,
+                             expectedOutputValues,
+                             radius,
+                             bias,
+                             alpha,
+                             beta);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/PackTest.cpp b/delegate/test/PackTest.cpp
new file mode 100644
index 0000000..1e7eb69
--- /dev/null
+++ b/delegate/test/PackTest.cpp
@@ -0,0 +1,516 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "PackTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+template <typename T>
+void PackFp32Axis0Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 3, 2, 3 };
+    std::vector<int32_t> expectedOutputShape { 2, 3, 2, 3 };
+
+    std::vector<std::vector<T>> inputValues;
+    inputValues.push_back(
+    {
+        1, 2, 3,
+        4, 5, 6,
+
+        7, 8, 9,
+        10, 11, 12,
+
+        13, 14, 15,
+        16, 17, 18
+    });
+
+    inputValues.push_back(
+    {
+        19, 20, 21,
+        22, 23, 24,
+
+        25, 26, 27,
+        28, 29, 30,
+
+        31, 32, 33,
+        34, 35, 36
+    });
+
+    std::vector<T> expectedOutputValues =
+    {
+        1, 2, 3,
+        4, 5, 6,
+
+        7, 8, 9,
+        10, 11, 12,
+
+        13, 14, 15,
+        16, 17, 18,
+
+
+        19, 20, 21,
+        22, 23, 24,
+
+        25, 26, 27,
+        28, 29, 30,
+
+        31, 32, 33,
+        34, 35, 36
+    };
+
+    PackTest<T>(tflite::BuiltinOperator_PACK,
+                tensorType,
+                backends,
+                inputShape,
+                expectedOutputShape,
+                inputValues,
+                expectedOutputValues,
+                0);
+}
+
+template <typename T>
+void PackFp32Axis1Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 3, 2, 3 };
+    std::vector<int32_t> expectedOutputShape { 3, 2, 2, 3 };
+
+    std::vector<std::vector<T>> inputValues;
+    inputValues.push_back(
+    {
+        1, 2, 3,
+        4, 5, 6,
+
+        7, 8, 9,
+        10, 11, 12,
+
+        13, 14, 15,
+        16, 17, 18
+    });
+
+    inputValues.push_back(
+    {
+        19, 20, 21,
+        22, 23, 24,
+
+        25, 26, 27,
+        28, 29, 30,
+
+        31, 32, 33,
+        34, 35, 36
+    });
+
+    std::vector<T> expectedOutputValues =
+    {
+        1, 2, 3,
+        4, 5, 6,
+
+        19, 20, 21,
+        22, 23, 24,
+
+
+        7, 8, 9,
+        10, 11, 12,
+
+        25, 26, 27,
+        28, 29, 30,
+
+
+        13, 14, 15,
+        16, 17, 18,
+
+        31, 32, 33,
+        34, 35, 36
+    };
+
+    PackTest<T>(tflite::BuiltinOperator_PACK,
+                tensorType,
+                backends,
+                inputShape,
+                expectedOutputShape,
+                inputValues,
+                expectedOutputValues,
+                1);
+}
+
+template <typename T>
+void PackFp32Axis2Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 3, 2, 3 };
+    std::vector<int32_t> expectedOutputShape { 3, 2, 2, 3 };
+
+    std::vector<std::vector<T>> inputValues;
+    inputValues.push_back(
+    {
+        1, 2, 3,
+        4, 5, 6,
+
+        7, 8, 9,
+        10, 11, 12,
+
+        13, 14, 15,
+        16, 17, 18
+    });
+
+    inputValues.push_back(
+    {
+        19, 20, 21,
+        22, 23, 24,
+
+        25, 26, 27,
+        28, 29, 30,
+
+        31, 32, 33,
+        34, 35, 36
+    });
+
+    std::vector<float> expectedOutputValues =
+    {
+        1, 2, 3,
+        19, 20, 21,
+
+        4, 5, 6,
+        22, 23, 24,
+
+        7, 8, 9,
+        25, 26, 27,
+
+        10, 11, 12,
+        28, 29, 30,
+
+        13, 14, 15,
+        31, 32, 33,
+
+        16, 17, 18,
+        34, 35, 36
+    };
+
+    PackTest<T>(tflite::BuiltinOperator_PACK,
+                tensorType,
+                backends,
+                inputShape,
+                expectedOutputShape,
+                inputValues,
+                expectedOutputValues,
+                2);
+}
+
+template <typename T>
+void PackFp32Axis3Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 3, 2, 3 };
+    std::vector<int32_t> expectedOutputShape { 3, 2, 3, 2 };
+
+    std::vector<std::vector<T>> inputValues;
+    inputValues.push_back(
+    {
+        1, 2, 3,
+        4, 5, 6,
+
+        7, 8, 9,
+        10, 11, 12,
+
+        13, 14, 15,
+        16, 17, 18
+    });
+
+    inputValues.push_back(
+    {
+        19, 20, 21,
+        22, 23, 24,
+
+        25, 26, 27,
+        28, 29, 30,
+
+        31, 32, 33,
+        34, 35, 36
+    });
+
+    std::vector<T> expectedOutputValues =
+    {
+        1, 19,
+        2, 20,
+        3, 21,
+
+        4, 22,
+        5, 23,
+        6, 24,
+
+
+        7, 25,
+        8, 26,
+        9, 27,
+
+        10, 28,
+        11, 29,
+        12, 30,
+
+
+        13, 31,
+        14, 32,
+        15, 33,
+
+        16, 34,
+        17, 35,
+        18, 36
+    };
+
+    PackTest<T>(tflite::BuiltinOperator_PACK,
+                tflite::TensorType_FLOAT32,
+                backends,
+                inputShape,
+                expectedOutputShape,
+                inputValues,
+                expectedOutputValues,
+                3);
+}
+
+template <typename T>
+void PackFp32Inputs3Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 3, 3 };
+    std::vector<int32_t> expectedOutputShape { 3, 3, 3 };
+
+    std::vector<std::vector<T>> inputValues;
+    inputValues.push_back(
+    {
+        1, 2, 3,
+        4, 5, 6,
+        7, 8, 9
+    });
+
+    inputValues.push_back(
+    {
+        10, 11, 12,
+        13, 14, 15,
+        16, 17, 18
+    });
+
+    inputValues.push_back(
+    {
+        19, 20, 21,
+        22, 23, 24,
+        25, 26, 27
+    });
+
+    std::vector<T> expectedOutputValues =
+    {
+        1, 2, 3,
+        10, 11, 12,
+        19, 20, 21,
+
+        4, 5, 6,
+        13, 14, 15,
+        22, 23, 24,
+
+        7, 8, 9,
+        16, 17, 18,
+        25, 26, 27
+    };
+
+    PackTest<T>(tflite::BuiltinOperator_PACK,
+                tensorType,
+                backends,
+                inputShape,
+                expectedOutputShape,
+                inputValues,
+                expectedOutputValues,
+                1);
+}
+
+TEST_SUITE("Pack_CpuAccTests")
+{
+
+// Fp32
+TEST_CASE ("Pack_Fp32_Axis0_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    PackFp32Axis0Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Axis1_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    PackFp32Axis1Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Axis2_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    PackFp32Axis2Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Axis3_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    PackFp32Axis3Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Inputs3_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    PackFp32Inputs3Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+// Uint8
+TEST_CASE ("Pack_Uint8_Axis0_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    PackFp32Axis0Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("Pack_Uint8_Inputs3_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    PackFp32Inputs3Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+// Uint8
+TEST_CASE ("Pack_Int8_Axis0_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    PackFp32Axis0Test<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+TEST_CASE ("Pack_Int8_Inputs3_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    PackFp32Inputs3Test<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+}
+
+TEST_SUITE("Pack_GpuAccTests")
+{
+
+// Fp32
+TEST_CASE ("Pack_Fp32_Axis0_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    PackFp32Axis0Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Axis1_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    PackFp32Axis1Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Axis2_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    PackFp32Axis2Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Axis3_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    PackFp32Axis3Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Inputs3_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    PackFp32Inputs3Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+// Uint8
+TEST_CASE ("Pack_Uint8_Axis0_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    PackFp32Axis0Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("Pack_Uint8_Inputs3_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    PackFp32Inputs3Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+// Int8
+TEST_CASE ("Pack_Int8_Axis0_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    PackFp32Axis0Test<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+TEST_CASE ("Pack_Int8_Inputs3_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    PackFp32Inputs3Test<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+}
+
+TEST_SUITE("Pack_CpuRefTests")
+{
+
+// Fp32
+TEST_CASE ("Pack_Fp32_Axis0_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    PackFp32Axis0Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Axis1_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    PackFp32Axis1Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Axis2_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    PackFp32Axis2Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Axis3_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    PackFp32Axis3Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Pack_Fp32_Inputs3_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    PackFp32Inputs3Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+// Uint8
+TEST_CASE ("Pack_Uint8_Axis0_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    PackFp32Axis0Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("Pack_Uint8_Inputs3_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    PackFp32Inputs3Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+// Int8
+TEST_CASE ("Pack_Int8_Axis0_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    PackFp32Axis0Test<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+TEST_CASE ("Pack_Int8_Inputs3_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    PackFp32Inputs3Test<int8_t>(tflite::TensorType_INT8, backends);
+}
+
+}
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/PackTestHelper.hpp b/delegate/test/PackTestHelper.hpp
new file mode 100644
index 0000000..0fd2f19
--- /dev/null
+++ b/delegate/test/PackTestHelper.hpp
@@ -0,0 +1,186 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+#include <string>
+
+namespace
+{
+
+std::vector<char> CreatePackTfLiteModel(tflite::BuiltinOperator packOperatorCode,
+                                        tflite::TensorType tensorType,
+                                        std::vector<int32_t>& inputTensorShape,
+                                        const std::vector <int32_t>& outputTensorShape,
+                                        const int32_t inputTensorNum,
+                                        unsigned int axis = 0,
+                                        float quantScale = 1.0f,
+                                        int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::vector<int32_t> operatorInputs{};
+    const std::vector<int32_t> operatorOutputs{inputTensorNum};
+    std::vector<int> subgraphInputs{};
+    const std::vector<int> subgraphOutputs{inputTensorNum};
+
+    std::vector<flatbuffers::Offset<Tensor>> tensors(inputTensorNum + 1);
+    for (int i = 0; i < inputTensorNum; ++i)
+    {
+        tensors[i] = CreateTensor(flatBufferBuilder,
+                                  flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                          inputTensorShape.size()),
+                                  tensorType,
+                                  1,
+                                  flatBufferBuilder.CreateString("input" + std::to_string(i)),
+                                  quantizationParameters);
+
+        // Add number of inputs to vector.
+        operatorInputs.push_back(i);
+        subgraphInputs.push_back(i);
+    }
+
+    // Create output tensor
+    tensors[inputTensorNum] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_PackOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions =
+            CreatePackOptions(flatBufferBuilder, inputTensorNum, axis).Union();
+
+    flatbuffers::Offset <Operator> packOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&packOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: Pack Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, packOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void PackTest(tflite::BuiltinOperator packOperatorCode,
+              tflite::TensorType tensorType,
+              std::vector<armnn::BackendId>& backends,
+              std::vector<int32_t>& inputShape,
+              std::vector<int32_t>& expectedOutputShape,
+              std::vector<std::vector<T>>& inputValues,
+              std::vector<T>& expectedOutputValues,
+              unsigned int axis = 0,
+              float quantScale = 1.0f,
+              int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreatePackTfLiteModel(packOperatorCode,
+                                                          tensorType,
+                                                          inputShape,
+                                                          expectedOutputShape,
+                                                          inputValues.size(),
+                                                          axis,
+                                                          quantScale,
+                                                          quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data for all input tensors.
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        // Get single input tensor and assign to interpreters.
+        auto inputTensorValues = inputValues[i];
+        armnnDelegate::FillInput<T>(tfLiteInterpreter, i, inputTensorValues);
+        armnnDelegate::FillInput<T>(armnnDelegateInterpreter, i, inputTensorValues);
+    }
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
+                                        armnnDelegateInterpreter,
+                                        expectedOutputShape,
+                                        expectedOutputValues);
+
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/PadTest.cpp b/delegate/test/PadTest.cpp
new file mode 100644
index 0000000..be54ede
--- /dev/null
+++ b/delegate/test/PadTest.cpp
@@ -0,0 +1,606 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "PadTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void Pad2dTest(std::vector<armnn::BackendId>& backends,
+               tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+               float pad = 0.0f)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 2 };
+    std::vector<int32_t> outputShape { 3, 5, 6 };
+    std::vector<int32_t> paddingShape { 3, 2 };
+
+    std::vector<float> inputValues = { 0.0f,  4.0f,
+                                       2.0f, -5.0f,
+                                       6.0f,  1.0f,
+                                       5.0f, -2.0f };
+
+    std::vector<float> expectedOutputValues = { pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, 0.0f, 4.0f, pad, pad,
+                                                pad, pad, 2.0f, -5.0f, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, 6.0f, 1.0f, pad, pad,
+                                                pad, pad, 5.0f, -2.0f, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad };
+
+    std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
+
+    PadTest<float>(padOperatorCode,
+                   ::tflite::TensorType_FLOAT32,
+                   backends,
+                   inputShape,
+                   paddingShape,
+                   outputShape,
+                   inputValues,
+                   paddingDim,
+                   expectedOutputValues,
+                   pad);
+}
+
+void Pad3dTest(std::vector<armnn::BackendId>& backends,
+               tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+               float pad = 0.0f)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 2 };
+    std::vector<int32_t> outputShape { 3, 5, 6 };
+    std::vector<int32_t> paddingShape { 3, 2 };
+
+    std::vector<float> inputValues = { 0.0f, 4.0f,
+                                       2.0f, 5.0f,
+                                       6.0f, 1.0f,
+                                       5.0f, 2.0f };
+
+    std::vector<float> expectedOutputValues = { pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, 0.0f, 4.0f, pad, pad,
+                                                pad, pad, 2.0f, 5.0f, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, 6.0f, 1.0f, pad, pad,
+                                                pad, pad, 5.0f, 2.0f, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad,
+                                                pad, pad, pad, pad, pad, pad };
+
+    std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
+
+    PadTest<float>(padOperatorCode,
+                   ::tflite::TensorType_FLOAT32,
+                   backends,
+                   inputShape,
+                   paddingShape,
+                   outputShape,
+                   inputValues,
+                   paddingDim,
+                   expectedOutputValues,
+                   pad);
+}
+
+void Pad4dTest(std::vector<armnn::BackendId>& backends,
+               tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+               float pad = 0.0f)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 3, 2 };
+    std::vector<int32_t> outputShape { 4, 5, 7, 4 };
+    std::vector<int32_t> paddingShape { 4, 2 };
+
+    std::vector<float> inputValues = { 0.0f,  1.0f,
+                                       2.0f,  3.0f,
+                                       4.0f,  5.0f,
+
+                                       6.0f,  7.0f,
+                                       8.0f,  9.0f,
+                                       10.0f, 11.0f,
+
+                                       12.0f, 13.0f,
+                                       14.0f, 15.0f,
+                                       16.0f, 17.0f,
+
+                                       18.0f, 19.0f,
+                                       20.0f, 21.0f,
+                                       22.0f, 23.0f };
+
+    std::vector<float> expectedOutputValues = { pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, 0.0f, 1.0f, pad,
+                                                pad, 2.0f, 3.0f, pad,
+                                                pad, 4.0f, 5.0f, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, 6.0f, 7.0f, pad,
+                                                pad, 8.0f, 9.0f, pad,
+                                                pad, 10.0f, 11.0f, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, 12.0f, 13.0f, pad,
+                                                pad, 14.0f, 15.0f, pad,
+                                                pad, 16.0f, 17.0f, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, 18.0f, 19.0f, pad,
+                                                pad, 20.0f, 21.0f, pad,
+                                                pad, 22.0f, 23.0f, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad,
+                                                pad, pad, pad, pad };
+
+    std::vector<int32_t> paddingDim = { 1, 1, 2, 1, 3, 1, 1, 1 };
+
+    PadTest<float>(padOperatorCode,
+                   ::tflite::TensorType_FLOAT32,
+                   backends,
+                   inputShape,
+                   paddingShape,
+                   outputShape,
+                   inputValues,
+                   paddingDim,
+                   expectedOutputValues,
+                   pad);
+}
+
+void PadInt8Test(std::vector<armnn::BackendId>& backends,
+                 tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+                 int8_t paddingValue = 0,
+                 int8_t p = 3,
+                 float quantizationScale = -2.0f,
+                 int32_t quantizationOffset = 3)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 2 };
+    std::vector<int32_t> outputShape { 3, 5, 6 };
+    std::vector<int32_t> paddingShape { 3, 2 };
+
+    std::vector<int8_t> inputValues = { 0,  4,
+                                        2, -5,
+                                        6,  1,
+                                        5, -2 };
+
+    std::vector<int8_t> expectedOutputValues = { p, p, p, p, p, p,
+                                                 p, p, p, p, p, p,
+                                                 p, p, 0, 4, p, p,
+                                                 p, p, 2, -5, p, p,
+                                                 p, p, p, p, p, p,
+
+                                                 p, p, p, p, p, p,
+                                                 p, p, p, p, p, p,
+                                                 p, p, 6, 1, p, p,
+                                                 p, p, 5, -2, p, p,
+                                                 p, p, p, p, p, p,
+
+                                                 p, p, p, p, p, p,
+                                                 p, p, p, p, p, p,
+                                                 p, p, p, p, p, p,
+                                                 p, p, p, p, p, p,
+                                                 p, p, p, p, p, p };
+
+    std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
+
+    PadTest<int8_t>(padOperatorCode,
+                    ::tflite::TensorType_INT8,
+                    backends,
+                    inputShape,
+                    paddingShape,
+                    outputShape,
+                    inputValues,
+                    paddingDim,
+                    expectedOutputValues,
+                    paddingValue,
+                    quantizationScale,
+                    quantizationOffset);
+}
+
+void PadUint8Test(std::vector<armnn::BackendId>& backends,
+                  tflite::BuiltinOperator padOperatorCode = tflite::BuiltinOperator_PAD,
+                  uint8_t paddingValue = 0,
+                  uint8_t p = 3,
+                  float quantizationScale = -2.0f,
+                  int32_t quantizationOffset = 3)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 2, 2 };
+    std::vector<int32_t> outputShape { 3, 5, 6 };
+    std::vector<int32_t> paddingShape { 3, 2 };
+
+    std::vector<uint8_t> inputValues = { 0, 4,
+                                         2, 5,
+                                         6, 1,
+                                         5, 2 };
+
+    std::vector<uint8_t> expectedOutputValues = { p, p, p, p, p, p,
+                                                  p, p, p, p, p, p,
+                                                  p, p, 0, 4, p, p,
+                                                  p, p, 2, 5, p, p,
+                                                  p, p, p, p, p, p,
+
+                                                  p, p, p, p, p, p,
+                                                  p, p, p, p, p, p,
+                                                  p, p, 6, 1, p, p,
+                                                  p, p, 5, 2, p, p,
+                                                  p, p, p, p, p, p,
+
+                                                  p, p, p, p, p, p,
+                                                  p, p, p, p, p, p,
+                                                  p, p, p, p, p, p,
+                                                  p, p, p, p, p, p,
+                                                  p, p, p, p, p, p };
+
+    std::vector<int32_t> paddingDim = { 0, 1, 2, 1, 2, 2 };
+
+    PadTest<uint8_t>(padOperatorCode,
+                     ::tflite::TensorType_UINT8,
+                     backends,
+                     inputShape,
+                     paddingShape,
+                     outputShape,
+                     inputValues,
+                     paddingDim,
+                     expectedOutputValues,
+                     paddingValue,
+                     quantizationScale,
+                     quantizationOffset);
+}
+
+TEST_SUITE("Pad_CpuRefTests")
+{
+
+TEST_CASE ("Pad2d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad2dTest(backends);
+}
+
+TEST_CASE ("Pad3d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad3dTest(backends);
+}
+
+TEST_CASE ("Pad4d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad4dTest(backends);
+}
+
+TEST_CASE ("Pad_Int8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PadInt8Test(backends);
+}
+
+TEST_CASE ("Pad_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PadUint8Test(backends);
+}
+
+TEST_CASE ("PadV22d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
+}
+
+TEST_CASE ("PadV23d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
+}
+
+TEST_CASE ("PadV24d_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
+}
+
+TEST_CASE ("PadV2_Int8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+TEST_CASE ("PadV2_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+} // TEST_SUITE("Pad_CpuRefTests")
+
+TEST_SUITE("Pad_CpuAccTests")
+{
+
+TEST_CASE ("Pad2d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad2dTest(backends);
+}
+
+TEST_CASE ("Pad3d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad3dTest(backends);
+}
+
+TEST_CASE ("Pad4d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad4dTest(backends);
+}
+
+TEST_CASE ("Pad_Int8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PadInt8Test(backends);
+}
+
+TEST_CASE ("Pad_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PadUint8Test(backends);
+}
+
+TEST_CASE ("PadV22d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
+}
+
+TEST_CASE ("PadV23d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
+}
+
+TEST_CASE ("PadV24d_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
+}
+
+TEST_CASE ("PadV2_Int8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+TEST_CASE ("PadV2_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+} // TEST_SUITE("Pad_CpuAccTests")
+
+TEST_SUITE("Pad_GpuAccTests")
+{
+
+TEST_CASE ("Pad2d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad2dTest(backends);
+}
+
+TEST_CASE ("Pad3d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad3dTest(backends);
+}
+
+TEST_CASE ("Pad4d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad4dTest(backends);
+}
+
+TEST_CASE ("Pad_Int8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PadInt8Test(backends);
+}
+
+TEST_CASE ("Pad_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PadUint8Test(backends);
+}
+
+TEST_CASE ("PadV22d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad2dTest(backends, tflite::BuiltinOperator_PADV2, -2.5);
+}
+
+TEST_CASE ("PadV23d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad3dTest(backends, tflite::BuiltinOperator_PADV2, 2.0);
+}
+
+TEST_CASE ("PadV24d_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    Pad4dTest(backends, tflite::BuiltinOperator_PADV2, -1.33);
+}
+
+TEST_CASE ("PadV2_Int8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PadInt8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+TEST_CASE ("PadV2_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PadUint8Test(backends, tflite::BuiltinOperator_PADV2, -1, -1);
+}
+
+} // TEST_SUITE("Pad_GpuAccTests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/PadTestHelper.hpp b/delegate/test/PadTestHelper.hpp
new file mode 100644
index 0000000..d049c52
--- /dev/null
+++ b/delegate/test/PadTestHelper.hpp
@@ -0,0 +1,224 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+template <typename T>
+std::vector<char> CreatePadTfLiteModel(
+    tflite::BuiltinOperator padOperatorCode,
+    tflite::TensorType tensorType,
+    tflite::MirrorPadMode paddingMode,
+    const std::vector<int32_t>& inputTensorShape,
+    const std::vector<int32_t>& paddingTensorShape,
+    const std::vector<int32_t>& outputTensorShape,
+    const std::vector<int32_t>& paddingDim,
+    const std::vector<T> paddingValue,
+    float quantScale = 1.0f,
+    int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    auto inputTensor = CreateTensor(flatBufferBuilder,
+                                    flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                            inputTensorShape.size()),
+                                    tensorType,
+                                    0,
+                                    flatBufferBuilder.CreateString("input"),
+                                    quantizationParameters);
+
+    auto paddingTensor = CreateTensor(flatBufferBuilder,
+                                      flatBufferBuilder.CreateVector<int32_t>(paddingTensorShape.data(),
+                                                                              paddingTensorShape.size()),
+                                      tflite::TensorType_INT32,
+                                      1,
+                                      flatBufferBuilder.CreateString("padding"));
+
+    auto outputTensor = CreateTensor(flatBufferBuilder,
+                                     flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                             outputTensorShape.size()),
+                                     tensorType,
+                                     2,
+                                     flatBufferBuilder.CreateString("output"),
+                                     quantizationParameters);
+
+    std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, paddingTensor, outputTensor};
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingDim.data()),
+                                                    sizeof(int32_t) * paddingDim.size())));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    std::vector<int32_t> operatorInputs;
+    std::vector<int> subgraphInputs;
+
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_PadOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions;
+
+    if (padOperatorCode == tflite::BuiltinOperator_PAD)
+    {
+        operatorInputs = {{ 0, 1 }};
+        subgraphInputs = {{ 0, 1 }};
+        operatorBuiltinOptions = CreatePadOptions(flatBufferBuilder).Union();
+    }
+    else if(padOperatorCode == tflite::BuiltinOperator_MIRROR_PAD)
+    {
+        operatorInputs = {{ 0, 1 }};
+        subgraphInputs = {{ 0, 1 }};
+
+        operatorBuiltinOptionsType = BuiltinOptions_MirrorPadOptions;
+        operatorBuiltinOptions = CreateMirrorPadOptions(flatBufferBuilder, paddingMode).Union();
+    }
+    else if (padOperatorCode == tflite::BuiltinOperator_PADV2)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingValue.data()),
+                                                        sizeof(T))));
+
+        const std::vector<int32_t> shape = { 1 };
+        auto padValueTensor = CreateTensor(flatBufferBuilder,
+                                           flatBufferBuilder.CreateVector<int32_t>(shape.data(),
+                                                                                   shape.size()),
+                                           tensorType,
+                                           3,
+                                           flatBufferBuilder.CreateString("paddingValue"),
+                                           quantizationParameters);
+
+        tensors.push_back(padValueTensor);
+
+        operatorInputs = {{ 0, 1, 3 }};
+        subgraphInputs = {{ 0, 1, 3 }};
+
+        operatorBuiltinOptionsType = BuiltinOptions_PadV2Options;
+        operatorBuiltinOptions = CreatePadV2Options(flatBufferBuilder).Union();
+    }
+
+    // create operator
+    const std::vector<int32_t> operatorOutputs{ 2 };
+    flatbuffers::Offset <Operator> paddingOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphOutputs{ 2 };
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&paddingOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Pad Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         padOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void PadTest(tflite::BuiltinOperator padOperatorCode,
+             tflite::TensorType tensorType,
+             const std::vector<armnn::BackendId>& backends,
+             const std::vector<int32_t>& inputShape,
+             const std::vector<int32_t>& paddingShape,
+             std::vector<int32_t>& outputShape,
+             std::vector<T>& inputValues,
+             std::vector<int32_t>& paddingDim,
+             std::vector<T>& expectedOutputValues,
+             T paddingValue,
+             float quantScale = 1.0f,
+             int quantOffset  = 0,
+             tflite::MirrorPadMode paddingMode = tflite::MirrorPadMode_SYMMETRIC)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreatePadTfLiteModel<T>(padOperatorCode,
+                                                            tensorType,
+                                                            paddingMode,
+                                                            inputShape,
+                                                            paddingShape,
+                                                            outputShape,
+                                                            paddingDim,
+                                                            {paddingValue},
+                                                            quantScale,
+                                                            quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    CHECK(tfLiteModel != nullptr);
+
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+}
+
+} // anonymous namespace
diff --git a/delegate/test/Pooling2dTest.cpp b/delegate/test/Pooling2dTest.cpp
new file mode 100644
index 0000000..ea87a29
--- /dev/null
+++ b/delegate/test/Pooling2dTest.cpp
@@ -0,0 +1,1275 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Pooling2dTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void MaxPool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 1, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 12.0f, 7.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_VALID,
+                         2,
+                         2,
+                         2,
+                         2);
+}
+
+void MaxPool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 1, 2, 1 };
+
+    std::vector<int8_t > inputValues = { -5, 8, -10, 7,
+                                         8, 12, -15, 2,
+                                         3, -4, -1, -11 };
+
+    std::vector<int8_t> expectedOutputValues = { 12, 7 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_VALID,
+                          2,
+                          2,
+                          2,
+                          2,
+                          tflite::ActivationFunctionType_NONE,
+                          2.5f,
+                          1);
+}
+
+void MaxPool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 12.0f, 7.0f, 3.0f, -1.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_SAME,
+                         2,
+                         2,
+                         2,
+                         2);
+}
+
+void MaxPool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<int8_t> inputValues = { -5, 8, -10, 7,
+                                        8, 12, -15, 2,
+                                        3, -4, -1, -11 };
+
+    std::vector<int8_t> expectedOutputValues = { 12, 7, 3, -1 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_SAME,
+                          2,
+                          2,
+                          2,
+                          2,
+                          tflite::ActivationFunctionType_NONE,
+                          2.5f,
+                          1);
+}
+
+void MaxPool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+    std::vector<float> inputValues = { -5.0f, -8.0f, -10.0f, 7.0f,
+                                       -8.0f, -12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 0.0f, 0.0f, 7.0f, 3.0f, 0.0f, 2.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_VALID,
+                         1,
+                         1,
+                         2,
+                         2,
+                         ::tflite::ActivationFunctionType_RELU);
+}
+
+void MaxPool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+    std::vector<int8_t> inputValues = { -5, -8, -10, 7,
+                                        -8, -12, -15, 2,
+                                        3, -4, -1, -11 };
+
+    std::vector<int8_t> expectedOutputValues = { 1, 1, 7, 3, 1, 2 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_VALID,
+                          1,
+                          1,
+                          2,
+                          2,
+                          ::tflite::ActivationFunctionType_RELU,
+                          2.0f,
+                          1);
+}
+
+void MaxPool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, -8.0f, -10.0f, 7.0f,
+                                       -8.0f, -12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 0.0f, 0.0f, 3.0f, 0.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_MAX_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_SAME,
+                         2,
+                         2,
+                         1,
+                         1,
+                         ::tflite::ActivationFunctionType_RELU6);
+}
+
+void MaxPool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<int8_t> inputValues = { -5, -8, -10, 7,
+                                        -8, -12, -15, 2,
+                                        3, -4, -1, -11 };
+
+    std::vector<int8_t> expectedOutputValues = { 1, 1, 3, 1 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_SAME,
+                          2,
+                          2,
+                          1,
+                          1,
+                          ::tflite::ActivationFunctionType_RELU6,
+                          2.0f,
+                          1);
+}
+
+void MaxPool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<uint8_t> inputValues = { 5, 8, 10, 7,
+                                         8, 12, 15, 2,
+                                         3, 4, 1, 11 };
+
+    std::vector<uint8_t> expectedOutputValues = { 12, 15, 4, 11 };
+
+    Pooling2dTest<uint8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                           ::tflite::TensorType_UINT8,
+                           backends,
+                           inputShape,
+                           outputShape,
+                           inputValues,
+                           expectedOutputValues,
+                           ::tflite::Padding_SAME,
+                           2,
+                           2,
+                           2,
+                           2,
+                           tflite::ActivationFunctionType_NONE,
+                           2.5f,
+                           1);
+}
+
+void MaxPool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+    std::vector<uint8_t> inputValues = { 12, 8, 10, 15,
+                                         8, 5, 7, 2,
+                                         3, 4, 1, 11 };
+
+    std::vector<uint8_t> expectedOutputValues = { 12, 10, 15, 8, 7, 11 };
+
+    Pooling2dTest<uint8_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                           ::tflite::TensorType_UINT8,
+                           backends,
+                           inputShape,
+                           outputShape,
+                           inputValues,
+                           expectedOutputValues,
+                           ::tflite::Padding_VALID,
+                           1,
+                           1,
+                           2,
+                           2,
+                           ::tflite::ActivationFunctionType_RELU,
+                           2.0f,
+                           1);
+}
+
+void MaxPool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<int16_t> inputValues = { -5, 8, -10, 7,
+                                         8, 12, -15, 2,
+                                         3, -4, -1, -11 };
+
+    std::vector<int16_t> expectedOutputValues = { 12, 7, 3, -1 };
+
+    Pooling2dTest<int16_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                           ::tflite::TensorType_INT16,
+                           backends,
+                           inputShape,
+                           outputShape,
+                           inputValues,
+                           expectedOutputValues,
+                           ::tflite::Padding_SAME,
+                           2,
+                           2,
+                           2,
+                           2,
+                           tflite::ActivationFunctionType_NONE,
+                           2.5f,
+                           0);
+}
+
+void MaxPool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+    std::vector<int16_t> inputValues = { -5, -8, -10, 7,
+                                         -8, -12, -15, 2,
+                                         3, -4, -1, -11 };
+
+    std::vector<int16_t> expectedOutputValues = { 0, 0, 7, 3, 0, 2 };
+
+    Pooling2dTest<int16_t>(tflite::BuiltinOperator_MAX_POOL_2D,
+                           ::tflite::TensorType_INT16,
+                           backends,
+                           inputShape,
+                           outputShape,
+                           inputValues,
+                           expectedOutputValues,
+                           ::tflite::Padding_VALID,
+                           1,
+                           1,
+                           2,
+                           2,
+                           ::tflite::ActivationFunctionType_RELU,
+                           2.0f,
+                           0);
+}
+
+void AveragePool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 1, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 5.75f, -4.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_VALID,
+                         2,
+                         2,
+                         2,
+                         2);
+}
+
+void AveragePool2dInt8PaddingValidTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 1, 2, 1 };
+
+    std::vector<int8_t > inputValues = { -5, 8, -10, 7,
+                                         8, 12, -15, 2,
+                                         3, -4, -1, -11 };
+
+    std::vector<int8_t> expectedOutputValues = { 6, -4 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_VALID,
+                          2,
+                          2,
+                          2,
+                          2,
+                          tflite::ActivationFunctionType_NONE,
+                          2.5f,
+                          1);
+}
+
+void AveragePool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 5.75f, -4.0f, -0.5f, -6.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_SAME,
+                         2,
+                         2,
+                         2,
+                         2);
+}
+
+void AveragePool2dInt8PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<int8_t > inputValues = { -5, 8, -10, 7,
+                                         8, 12, -15, 2,
+                                         3, -4, -1, -11 };
+
+    std::vector<int8_t> expectedOutputValues = { 6, -4, -1, -6 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_SAME,
+                          2,
+                          2,
+                          2,
+                          2,
+                          tflite::ActivationFunctionType_NONE,
+                          2.5f,
+                          1);
+}
+
+void AveragePool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       -8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, 11.0f };
+
+    std::vector<float> expectedOutputValues = { 1.75f, 0.0f, 0.0f, 0.75f, 0.0f, 0.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_VALID,
+                         1,
+                         1,
+                         2,
+                         2,
+                         ::tflite::ActivationFunctionType_RELU);
+}
+
+void AveragePool2dInt8ReluTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+    std::vector<int8_t> inputValues = { -5, 8, -10, 7,
+                                        -8, 12, -15, 2,
+                                        3, -4, -1, 11 };
+
+    std::vector<int8_t> expectedOutputValues = { 2, 1, 1, 1, 1, 1 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_VALID,
+                          1,
+                          1,
+                          2,
+                          2,
+                          ::tflite::ActivationFunctionType_RELU,
+                          2.5f,
+                          1);
+}
+
+void AveragePool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       -8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, 11.0f };
+
+    std::vector<float> expectedOutputValues = { 0.0f, 0.0f, 3.0f, 0.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_SAME,
+                         2,
+                         2,
+                         1,
+                         1,
+                         ::tflite::ActivationFunctionType_RELU6);
+}
+
+void AveragePool2dInt8Relu6Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<int8_t> inputValues = { -5, 8, -10, 7,
+                                        -8, 12, -15, 2,
+                                        3, -4, -1, 11 };
+
+    std::vector<int8_t> expectedOutputValues = { 1, 1, 3, 1 };
+
+    Pooling2dTest<int8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                          ::tflite::TensorType_INT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          ::tflite::Padding_SAME,
+                          2,
+                          2,
+                          1,
+                          1,
+                          ::tflite::ActivationFunctionType_RELU6,
+                          2.5f,
+                          1);
+}
+
+void AveragePool2dUint8PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<uint8_t> inputValues = { 5, 8, 10, 7,
+                                         8, 12, 15, 2,
+                                         3, 4, 1, 11 };
+
+    std::vector<uint8_t> expectedOutputValues = { 8, 9, 4, 6 };
+
+    Pooling2dTest<uint8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                           ::tflite::TensorType_UINT8,
+                           backends,
+                           inputShape,
+                           outputShape,
+                           inputValues,
+                           expectedOutputValues,
+                           ::tflite::Padding_SAME,
+                           2,
+                           2,
+                           2,
+                           2,
+                           tflite::ActivationFunctionType_NONE,
+                           2.5f,
+                           1);
+}
+
+void AveragePool2dUint8ReluTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+    std::vector<uint8_t> inputValues = { 12, 8, 10, 15,
+                                         8, 5, 7, 2,
+                                         3, 4, 1, 11 };
+
+    std::vector<uint8_t> expectedOutputValues = { 8, 8, 9, 5, 4, 5 };
+
+    Pooling2dTest<uint8_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                           ::tflite::TensorType_UINT8,
+                           backends,
+                           inputShape,
+                           outputShape,
+                           inputValues,
+                           expectedOutputValues,
+                           ::tflite::Padding_VALID,
+                           1,
+                           1,
+                           2,
+                           2,
+                           ::tflite::ActivationFunctionType_RELU,
+                           2.0f,
+                           1);
+}
+
+void AveragePool2dInt16PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<int16_t > inputValues = { -5, 8, -10, 7,
+                                         8, 12, -15, 2,
+                                         3, -4, -1, -11 };
+
+    std::vector<int16_t> expectedOutputValues = { 6, -4, -1, -6 };
+
+    Pooling2dTest<int16_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                           ::tflite::TensorType_INT16,
+                           backends,
+                           inputShape,
+                           outputShape,
+                           inputValues,
+                           expectedOutputValues,
+                           ::tflite::Padding_SAME,
+                           2,
+                           2,
+                           2,
+                           2,
+                           tflite::ActivationFunctionType_NONE,
+                           2.5f,
+                           0);
+}
+
+void AveragePool2dInt16ReluTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+    std::vector<int16_t> inputValues = { -5, 8, -10, 7,
+                                         -8, 12, -15, 2,
+                                         3, -4, -1, 11 };
+
+    std::vector<int16_t> expectedOutputValues = { 2, 0, 0, 1, 0, 0 };
+
+    Pooling2dTest<int16_t>(tflite::BuiltinOperator_AVERAGE_POOL_2D,
+                           ::tflite::TensorType_INT16,
+                           backends,
+                           inputShape,
+                           outputShape,
+                           inputValues,
+                           expectedOutputValues,
+                           ::tflite::Padding_VALID,
+                           1,
+                           1,
+                           2,
+                           2,
+                           ::tflite::ActivationFunctionType_RELU,
+                           2.5f,
+                           0);
+}
+
+void L2Pool2dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 1, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 8.616844f, 9.721111f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_VALID,
+                         2,
+                         2,
+                         2,
+                         2);
+}
+
+void L2Pool2dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { 8.616844f, 9.721111f, 3.535534f, 7.81025f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_SAME,
+                         2,
+                         2,
+                         2,
+                         2);
+}
+
+void L2Pool2dFP32ReluTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       -8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, 11.0f };
+
+    std::vector<float> expectedOutputValues = { 8.616844f, 11.543396f, 9.721111f, 7.632169f, 9.8234415f, 9.367497f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_VALID,
+                         1,
+                         1,
+                         2,
+                         2,
+                         ::tflite::ActivationFunctionType_RELU);
+}
+
+void L2Pool2dFP32Relu6Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 2, 1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       -8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, 11.0f };
+
+    std::vector<float> expectedOutputValues = { 5.0f, 6.0f, 3.0f, 1.0f };
+
+    Pooling2dTest<float>(tflite::BuiltinOperator_L2_POOL_2D,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         ::tflite::Padding_SAME,
+                         2,
+                         2,
+                         1,
+                         1,
+                         ::tflite::ActivationFunctionType_RELU6);
+}
+
+TEST_SUITE("Pooling2d_GpuAccTests")
+{
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingValid_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingValid_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingSame_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingSame_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu6_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool2dFP32Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu6_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Uint8_Relu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool2dUint8ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingValid_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingValid_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingSame_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingSame_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu6_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool2dFP32Relu6Test(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_Relu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_Relu6_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_Relu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool2dUint8ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingValid_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    L2Pool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingSame_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    L2Pool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    L2Pool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu6_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    L2Pool2dFP32Relu6Test(backends);
+}
+
+} // TEST_SUITE("Pooling2d_GpuAccTests")
+
+TEST_SUITE("Pooling2d_CpuAccTests")
+{
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingValid_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingValid_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingSame_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingSame_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu6_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool2dFP32Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu6_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Uint8_Relu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool2dUint8ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingValid_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingValid_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingSame_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingSame_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu6_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool2dFP32Relu6Test(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_Relu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_Relu6_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_Relu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool2dUint8ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingValid_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    L2Pool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingSame_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    L2Pool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    L2Pool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu6_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    L2Pool2dFP32Relu6Test(backends);
+}
+
+} // TEST_SUITE("Pooling2d_CpuAccTests")
+
+TEST_SUITE("Pooling2d_CpuRefTests")
+{
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingValid_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingValid_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_PaddingSame_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_PaddingSame_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_FP32_Relu6_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dFP32Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int8_Relu6_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Uint8_PaddingSame_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Uint8_Relu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dUint8ReluTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int16_PaddingSame_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dInt16PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling2d_Int16_Relu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool2dInt16ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingValid_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingValid_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dInt8PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_PaddingSame_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_PaddingSame_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dInt8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_FP32_Relu6_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dFP32Relu6Test(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_Relu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dInt8ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int8_Relu6_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dInt8Relu6Test(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_PaddingSame_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dUint8PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Uint8_Relu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dUint8ReluTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int16_PaddingSame_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dInt16PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling2d_Int16_Relu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool2dInt16ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingValid_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    L2Pool2dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_PaddingSame_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    L2Pool2dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    L2Pool2dFP32ReluTest(backends);
+}
+
+TEST_CASE ("L2Pooling2d_FP32_Relu6_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    L2Pool2dFP32Relu6Test(backends);
+}
+
+} // TEST_SUITE("Pooling2d_CpuRefTests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/Pooling2dTestHelper.hpp b/delegate/test/Pooling2dTestHelper.hpp
new file mode 100644
index 0000000..6de85b6
--- /dev/null
+++ b/delegate/test/Pooling2dTestHelper.hpp
@@ -0,0 +1,196 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreatePooling2dTfLiteModel(
+    tflite::BuiltinOperator poolingOperatorCode,
+    tflite::TensorType tensorType,
+    const std::vector <int32_t>& inputTensorShape,
+    const std::vector <int32_t>& outputTensorShape,
+    tflite::Padding padding = tflite::Padding_SAME,
+    int32_t strideWidth = 0,
+    int32_t strideHeight = 0,
+    int32_t filterWidth = 0,
+    int32_t filterHeight = 0,
+    tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
+    float quantScale = 1.0f,
+    int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    flatbuffers::Offset<tflite::Buffer> buffers[3] = {CreateBuffer(flatBufferBuilder),
+                                                                        CreateBuffer(flatBufferBuilder),
+                                                                        CreateBuffer(flatBufferBuilder)};
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    flatbuffers::Offset<Tensor> tensors[2] {
+         CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters),
+
+         CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters)
+    };
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_Pool2DOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreatePool2DOptions(flatBufferBuilder,
+                                                                           padding,
+                                                                           strideWidth,
+                                                                           strideHeight,
+                                                                           filterWidth,
+                                                                           filterHeight,
+                                                                           fusedActivation).Union();
+
+    const std::vector<int32_t> operatorInputs{0};
+    const std::vector<int32_t> operatorOutputs{1};
+    flatbuffers::Offset <Operator> poolingOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const int subgraphInputs[1] = {0};
+    const int subgraphOutputs[1] = {1};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors, 2),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs, 1),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs, 1),
+                       flatBufferBuilder.CreateVector(&poolingOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Pooling2d Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, poolingOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers, 3));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void Pooling2dTest(tflite::BuiltinOperator poolingOperatorCode,
+                   tflite::TensorType tensorType,
+                   std::vector<armnn::BackendId>& backends,
+                   std::vector<int32_t>& inputShape,
+                   std::vector<int32_t>& outputShape,
+                   std::vector<T>& inputValues,
+                   std::vector<T>& expectedOutputValues,
+                   tflite::Padding padding = tflite::Padding_SAME,
+                   int32_t strideWidth = 0,
+                   int32_t strideHeight = 0,
+                   int32_t filterWidth = 0,
+                   int32_t filterHeight = 0,
+                   tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
+                   float quantScale = 1.0f,
+                   int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreatePooling2dTfLiteModel(poolingOperatorCode,
+                                                               tensorType,
+                                                               inputShape,
+                                                               outputShape,
+                                                               padding,
+                                                               strideWidth,
+                                                               strideHeight,
+                                                               filterWidth,
+                                                               filterHeight,
+                                                               fusedActivation,
+                                                               quantScale,
+                                                               quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    CHECK(tfLiteModel != nullptr);
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelegateInputData[i] = inputValues[i];
+    }
+
+    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        armnnDelegateInputData[i] = inputValues[i];
+    }
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+}
+
+} // anonymous namespace
+
+
+
+
diff --git a/delegate/test/Pooling3dTest.cpp b/delegate/test/Pooling3dTest.cpp
new file mode 100644
index 0000000..85202e1
--- /dev/null
+++ b/delegate/test/Pooling3dTest.cpp
@@ -0,0 +1,431 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Pooling3dTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+// Pool3D custom op was only added in tflite r2.6.
+#if defined(ARMNN_POST_TFLITE_2_5)
+
+void MaxPool3dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input and expected output data
+    std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
+    std::vector<int32_t> outputShape = { 1, 1, 2, 3, 1 };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6 };
+    std::vector<float> expectedOutputValues = { 6, 6, 4 };
+
+    // poolType string required to create the correct pooling operator
+    // Padding type required to create the padding in custom options
+    std::string poolType = "kMax";
+    TfLitePadding padding = kTfLitePaddingValid;
+
+    Pooling3dTest<float>(poolType,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         padding,
+                         1,
+                         1,
+                         1,
+                         2,
+                         2,
+                         2);
+}
+
+void MaxPool3dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data and expected output data
+    std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
+    std::vector<int32_t> outputShape = { 1, 2, 3, 4, 1 };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6 };
+    std::vector<float> expectedOutputValues = { 6, 6, 4, 4, 6, 6, 6, 6, 4, 5, 6, 6, 6, 6, 4, 4 };
+
+    // poolType string required to create the correct pooling operator
+    // Padding type required to create the padding in custom options
+    std::string poolType = "kMax";
+    TfLitePadding padding = kTfLitePaddingSame;
+
+    Pooling3dTest<float>(poolType,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         padding,
+                         1,
+                         1,
+                         1,
+                         2,
+                         2,
+                         2);
+}
+
+void MaxPool3dFP32H1Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data and expected output data
+    std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
+    std::vector<int32_t> outputShape = { 1, 1, 3, 3, 1 };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6 };
+    std::vector<float> expectedOutputValues = { 2, 3 };
+
+    // poolType string required to create the correct pooling operator
+    // Padding type required to create the padding in custom options
+    std::string poolType = "kMax";
+    TfLitePadding padding = kTfLitePaddingValid;
+
+    Pooling3dTest<float>(poolType,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         padding,
+                         1,
+                         1,
+                         1,
+                         2,
+                         1,
+                         2);
+}
+
+void MaxPool3dFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data and expected output data
+    std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
+    std::vector<int32_t> outputShape = { 1, 2, 3, 4, 1 };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6 };
+    std::vector<float> expectedOutputValues = { 6, 6 };
+
+    // poolType string required to create the correct pooling operator
+    // Padding type required to create the padding in custom options
+    std::string poolType = "kMax";
+    TfLitePadding padding = kTfLitePaddingUnknown;
+
+    Pooling3dTest<float>(poolType,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         padding,
+                         1,
+                         1,
+                         1,
+                         2,
+                         2,
+                         2);
+}
+
+void AveragePool3dFP32PaddingValidTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data and expected output data.
+    std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
+    std::vector<int32_t> outputShape = { 1, 1, 2, 3, 1 };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6 };
+    std::vector<float> expectedOutputValues = { 3.5, 3, 2.5 };
+
+    // poolType string required to create the correct pooling operator
+    // Padding type required to create the padding in custom options
+    std::string poolType = "kAverage";
+    TfLitePadding padding = kTfLitePaddingValid;
+
+    Pooling3dTest<float>(poolType,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         padding,
+                         1,
+                         1,
+                         1,
+                         2,
+                         2,
+                         2);
+}
+
+void AveragePool3dFP32PaddingSameTest(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data and expected output data
+    std::vector<int32_t> inputShape = { 4, 2, 3, 1, 1 };
+    std::vector<int32_t> outputShape = { 4, 2, 3, 1, 1 };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6 };
+    std::vector<float> expectedOutputValues = { 3, 4, 4.5, 4.5, 5.5, 6, 3, 4, 4.5, 4.5, 5.5, 6, 3, 4, 4.5, 4.5 };
+
+    // poolType string required to create the correct pooling operator
+    // Padding type required to create the padding in custom options
+    std::string poolType = "kAverage";
+    TfLitePadding padding = kTfLitePaddingSame;
+
+    Pooling3dTest<float>(poolType,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         padding,
+                         1,
+                         1,
+                         1,
+                         2,
+                         2,
+                         2);
+}
+
+void AveragePool3dFP32H1Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data and expected output data
+    std::vector<int32_t> inputShape = { 1, 2, 3, 4, 1 };
+    std::vector<int32_t> outputShape = { 1, 1, 2, 2, 1 };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6 };
+    std::vector<float> expectedOutputValues = { 1.5, 3.5 };
+
+    // poolType string required to create the correct pooling operator
+    // Padding type required to create the padding in custom options
+    std::string poolType = "kAverage";
+    TfLitePadding padding = kTfLitePaddingUnknown;
+
+    Pooling3dTest<float>(poolType,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         padding,
+                         2,
+                         2,
+                         2,
+                         2,
+                         1,
+                         2);
+}
+
+void AveragePool3dFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data and expected output data
+    std::vector<int32_t> inputShape = { 4, 3, 2, 1, 1 };
+    std::vector<int32_t> outputShape = { 1, 2, 2, 4, 1 };
+
+    std::vector<float> inputValues = { 1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6,
+                                       1, 2, 3, 4, 5, 6 };
+    std::vector<float> expectedOutputValues = { 3.125, 4.25 };
+
+    // poolType string required to create the correct pooling operator
+    // Padding type required to create the padding in custom options
+    std::string poolType = "kMax";
+    TfLitePadding padding = kTfLitePaddingUnknown;
+
+    Pooling3dTest<float>(poolType,
+                         ::tflite::TensorType_FLOAT32,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         padding,
+                         2,
+                         2,
+                         2,
+                         2,
+                         2,
+                         2);
+}
+
+TEST_SUITE("Pooling3d_GpuAccTests")
+{
+
+TEST_CASE ("MaxPooling3d_FP32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool3dFP32Test(backends);
+}
+
+TEST_CASE ("MaxPooling3d_FP32_PaddingValid_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool3dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling3d_FP32_PaddingSame_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool3dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling3d_FP32_H1_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    MaxPool3dFP32H1Test(backends);
+}
+
+TEST_CASE ("AveragePooling3d_FP32_PaddingValid_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool3dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling3d_FP32_PaddingSame_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool3dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling3d_FP32_H1_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    AveragePool3dFP32H1Test(backends);
+}
+
+} // TEST_SUITE("Pooling3d_GpuAccTests")
+
+TEST_SUITE("Pooling3d_CpuAccTests")
+{
+
+TEST_CASE ("MaxPooling3d_FP32_PaddingValid_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool3dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling3d_FP32_PaddingSame_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool3dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling3d_FP32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool3dFP32Test(backends);
+}
+
+TEST_CASE ("MaxPooling3d_FP32_H1_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    MaxPool3dFP32H1Test(backends);
+}
+
+TEST_CASE ("AveragePooling3d_FP32_PaddingValid_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool3dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling3d_FP32_PaddingSame_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool3dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling3d_FP32_H1_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    AveragePool3dFP32H1Test(backends);
+}
+
+} // TEST_SUITE("Pooling3d_CpuAccTests")
+
+TEST_SUITE("Pooling3d_CpuRefTests")
+{
+TEST_CASE ("MaxPooling3d_FP32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool3dFP32Test(backends);
+}
+
+TEST_CASE ("MaxPooling3d_FP32_PaddingValid_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool3dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("MaxPooling3d_FP32_PaddingSame_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool3dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("MaxPooling3d_FP32_H1_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    MaxPool3dFP32H1Test(backends);
+}
+
+TEST_CASE ("AveragePooling3d_FP32_PaddingValid_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool3dFP32PaddingValidTest(backends);
+}
+
+TEST_CASE ("AveragePooling3d_FP32_PaddingSame_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool3dFP32PaddingSameTest(backends);
+}
+
+TEST_CASE ("AveragePooling3d_FP32_H1_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    AveragePool3dFP32H1Test(backends);
+}
+
+} // TEST_SUITE("Pooling3d_CpuRefTests")
+
+#endif
+
+}
\ No newline at end of file
diff --git a/delegate/test/Pooling3dTestHelper.hpp b/delegate/test/Pooling3dTestHelper.hpp
new file mode 100644
index 0000000..dd90e4b
--- /dev/null
+++ b/delegate/test/Pooling3dTestHelper.hpp
@@ -0,0 +1,298 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <flatbuffers/flexbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/custom_ops_register.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+#if defined(ARMNN_POST_TFLITE_2_5)
+
+std::vector<uint8_t> CreateCustomOptions(int, int, int, int, int, int, TfLitePadding);
+
+std::vector<char> CreatePooling3dTfLiteModel(
+    std::string poolType,
+    tflite::TensorType tensorType,
+    const std::vector<int32_t>& inputTensorShape,
+    const std::vector<int32_t>& outputTensorShape,
+    TfLitePadding padding = kTfLitePaddingSame,
+    int32_t strideWidth = 0,
+    int32_t strideHeight = 0,
+    int32_t strideDepth = 0,
+    int32_t filterWidth = 0,
+    int32_t filterHeight = 0,
+    int32_t filterDepth = 0,
+    tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
+    float quantScale = 1.0f,
+    int quantOffset = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    // Create the input and output tensors
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // Create the custom options from the function below
+    std::vector<uint8_t> customOperatorOptions = CreateCustomOptions(strideHeight, strideWidth, strideDepth,
+                                                                     filterHeight, filterWidth, filterDepth, padding);
+    // opCodeIndex is created as a uint8_t to avoid map lookup
+    uint8_t opCodeIndex = 0;
+    // Set the operator name based on the PoolType passed in from the test case
+    std::string opName = "";
+    if (poolType == "kMax")
+    {
+        opName = "MaxPool3D";
+    }
+    else
+    {
+        opName = "AveragePool3D";
+    }
+    // To create a custom operator code you pass in the builtin code for custom operators and the name of the custom op
+    flatbuffers::Offset<OperatorCode> operatorCode = CreateOperatorCodeDirect(flatBufferBuilder,
+                                                                              tflite::BuiltinOperator_CUSTOM,
+                                                                              opName.c_str());
+
+    // Create the Operator using the opCodeIndex and custom options. Also sets builtin options to none.
+    const std::vector<int32_t> operatorInputs{ 0 };
+    const std::vector<int32_t> operatorOutputs{ 1 };
+    flatbuffers::Offset<Operator> poolingOperator =
+        CreateOperator(flatBufferBuilder,
+                       opCodeIndex,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       tflite::BuiltinOptions_NONE,
+                       0,
+                       flatBufferBuilder.CreateVector<uint8_t>(customOperatorOptions),
+                       tflite::CustomOptionsFormat_FLEXBUFFERS);
+
+    // Create the subgraph using the operator created above.
+    const std::vector<int> subgraphInputs{ 0 };
+    const std::vector<int> subgraphOutputs{ 1 };
+    flatbuffers::Offset<SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&poolingOperator, 1));
+
+    flatbuffers::Offset<flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Pooling3d Operator Model");
+
+    // Create the model using operatorCode and the subgraph.
+    flatbuffers::Offset<Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template<typename T>
+void Pooling3dTest(std::string poolType,
+                   tflite::TensorType tensorType,
+                   std::vector<armnn::BackendId>& backends,
+                   std::vector<int32_t>& inputShape,
+                   std::vector<int32_t>& outputShape,
+                   std::vector<T>& inputValues,
+                   std::vector<T>& expectedOutputValues,
+                   TfLitePadding padding = kTfLitePaddingSame,
+                   int32_t strideWidth = 0,
+                   int32_t strideHeight = 0,
+                   int32_t strideDepth = 0,
+                   int32_t filterWidth = 0,
+                   int32_t filterHeight = 0,
+                   int32_t filterDepth = 0,
+                   tflite::ActivationFunctionType fusedActivation = tflite::ActivationFunctionType_NONE,
+                   float quantScale = 1.0f,
+                   int quantOffset = 0)
+{
+    using namespace tflite;
+    // Create the single op model buffer
+    std::vector<char> modelBuffer = CreatePooling3dTfLiteModel(poolType,
+                                                               tensorType,
+                                                               inputShape,
+                                                               outputShape,
+                                                               padding,
+                                                               strideWidth,
+                                                               strideHeight,
+                                                               strideDepth,
+                                                               filterWidth,
+                                                               filterHeight,
+                                                               filterDepth,
+                                                               fusedActivation,
+                                                               quantScale,
+                                                               quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    CHECK(tfLiteModel != nullptr);
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+
+    // Custom ops need to be added to the BuiltinOp resolver before the interpreter is created
+    // Based on the poolType from the test case add the custom operator using the name and the tflite
+    // registration function
+    tflite::ops::builtin::BuiltinOpResolver armnn_op_resolver;
+    if (poolType == "kMax")
+    {
+        armnn_op_resolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
+    }
+    else
+    {
+        armnn_op_resolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
+    }
+
+    CHECK(InterpreterBuilder(tfLiteModel, armnn_op_resolver)
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+
+    // Custom ops need to be added to the BuiltinOp resolver before the interpreter is created
+    // Based on the poolType from the test case add the custom operator using the name and the tflite
+    // registration function
+    tflite::ops::builtin::BuiltinOpResolver tflite_op_resolver;
+    if (poolType == "kMax")
+    {
+        tflite_op_resolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
+    }
+    else
+    {
+        tflite_op_resolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
+    }
+
+    CHECK(InterpreterBuilder(tfLiteModel, tflite_op_resolver)
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelegateInputData[i] = inputValues[i];
+    }
+
+    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        armnnDelegateInputData[i] = inputValues[i];
+    }
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+}
+
+// Function to create the flexbuffer custom options for the custom pooling3d operator.
+std::vector<uint8_t> CreateCustomOptions(int strideHeight, int strideWidth, int strideDepth,
+                                         int filterHeight, int filterWidth, int filterDepth, TfLitePadding padding)
+{
+    auto flex_builder = std::make_unique<flexbuffers::Builder>();
+    size_t map_start = flex_builder->StartMap();
+    flex_builder->String("data_format", "NDHWC");
+    // Padding is created as a key and padding type. Only VALID and SAME supported
+    if (padding == kTfLitePaddingValid)
+    {
+        flex_builder->String("padding", "VALID");
+    }
+    else
+    {
+        flex_builder->String("padding", "SAME");
+    }
+
+    // Vector of filter dimensions in order ( 1, Depth, Height, Width, 1 )
+    auto start = flex_builder->StartVector("ksize");
+    flex_builder->Add(1);
+    flex_builder->Add(filterDepth);
+    flex_builder->Add(filterHeight);
+    flex_builder->Add(filterWidth);
+    flex_builder->Add(1);
+    // EndVector( start, bool typed, bool fixed)
+    flex_builder->EndVector(start, true, false);
+
+    // Vector of stride dimensions in order ( 1, Depth, Height, Width, 1 )
+    auto stridesStart = flex_builder->StartVector("strides");
+    flex_builder->Add(1);
+    flex_builder->Add(strideDepth);
+    flex_builder->Add(strideHeight);
+    flex_builder->Add(strideWidth);
+    flex_builder->Add(1);
+    // EndVector( stridesStart, bool typed, bool fixed)
+    flex_builder->EndVector(stridesStart, true, false);
+
+    flex_builder->EndMap(map_start);
+    flex_builder->Finish();
+
+    return flex_builder->GetBuffer();
+}
+#endif
+} // anonymous namespace
+
+
+
+
diff --git a/delegate/test/PreluTest.cpp b/delegate/test/PreluTest.cpp
new file mode 100644
index 0000000..40bf1dd
--- /dev/null
+++ b/delegate/test/PreluTest.cpp
@@ -0,0 +1,134 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "PreluTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate {
+
+void PreluFloatSimpleTest(std::vector <armnn::BackendId>& backends, bool isAlphaConst, bool isDynamicOutput = false) {
+    std::vector<int32_t> inputShape { 1, 2, 3 };
+    std::vector<int32_t> alphaShape { 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3 };
+
+    if (isDynamicOutput)
+    {
+        outputShape.clear();
+    }
+
+    std::vector<float> inputData = { -14.f, 2.f, 0.f, 1.f, -5.f, 14.f };
+    std::vector<float> alphaData = { 0.5f };
+    std::vector<float> expectedOutput = { -7.f, 2.f, 0.f, 1.f, -2.5f, 14.f };
+
+    PreluTest(tflite::BuiltinOperator_PRELU,
+              ::tflite::TensorType_FLOAT32,
+              backends,
+              inputShape,
+              alphaShape,
+              outputShape,
+              inputData,
+              alphaData,
+              expectedOutput,
+              isAlphaConst);
+}
+
+TEST_SUITE("Prelu_CpuRefTests")
+{
+
+TEST_CASE ("PreluFp32SimpleConstTest_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PreluFloatSimpleTest(backends, true);
+}
+
+TEST_CASE ("PreluFp32SimpleTest_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PreluFloatSimpleTest(backends, false);
+}
+
+TEST_CASE ("PreluFp32SimpleConstDynamicTest_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PreluFloatSimpleTest(backends, true, true);
+}
+
+TEST_CASE ("PreluFp32SimpleDynamicTest_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    PreluFloatSimpleTest(backends, false, true);
+}
+
+} // TEST_SUITE("Prelu_CpuRefTests")
+
+TEST_SUITE("Prelu_CpuAccTests")
+{
+
+TEST_CASE ("PreluFp32SimpleConstTest_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PreluFloatSimpleTest(backends, true);
+}
+
+TEST_CASE ("PreluFp32SimpleTest_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PreluFloatSimpleTest(backends, false);
+}
+
+TEST_CASE ("PreluFp32SimpleConstDynamicTest_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PreluFloatSimpleTest(backends, true, true);
+}
+
+TEST_CASE ("PreluFp32SimpleDynamicTest_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    PreluFloatSimpleTest(backends, false, true);
+}
+
+} // TEST_SUITE("Prelu_CpuAccTests")
+
+TEST_SUITE("Prelu_GpuAccTests")
+{
+
+TEST_CASE ("PreluFp32SimpleConstTest_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PreluFloatSimpleTest(backends, true);
+}
+
+TEST_CASE ("PreluFp32SimpleTest_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PreluFloatSimpleTest(backends, false);
+}
+
+TEST_CASE ("PreluFp32SimpleConstDynamicTest_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PreluFloatSimpleTest(backends, true, true);
+}
+
+TEST_CASE ("PreluFp32SimpleDynamicTest_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    PreluFloatSimpleTest(backends, false, true);
+}
+
+} // TEST_SUITE("Prelu_GpuAccTests")
+
+}
\ No newline at end of file
diff --git a/delegate/test/PreluTestHelper.hpp b/delegate/test/PreluTestHelper.hpp
new file mode 100644
index 0000000..0721c13
--- /dev/null
+++ b/delegate/test/PreluTestHelper.hpp
@@ -0,0 +1,195 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreatePreluTfLiteModel(tflite::BuiltinOperator preluOperatorCode,
+                                         tflite::TensorType tensorType,
+                                         const std::vector<int32_t>& inputShape,
+                                         const std::vector<int32_t>& alphaShape,
+                                         const std::vector<int32_t>& outputShape,
+                                         std::vector<float>& alphaData,
+                                         bool alphaIsConstant)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(
+        reinterpret_cast<const uint8_t *>(alphaData.data()), sizeof(float) * alphaData.size())));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ 1.0f }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ 0 }));
+
+    auto inputTensor = CreateTensor(flatBufferBuilder,
+                                    flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
+                                                                          inputShape.size()),
+                                    tensorType,
+                                    1,
+                                    flatBufferBuilder.CreateString("input"),
+                                    quantizationParameters);
+
+    auto alphaTensor = CreateTensor(flatBufferBuilder,
+                                    flatBufferBuilder.CreateVector<int32_t>(alphaShape.data(),
+                                                                          alphaShape.size()),
+                                    tensorType,
+                                    2,
+                                    flatBufferBuilder.CreateString("alpha"),
+                                    quantizationParameters);
+
+    auto outputTensor = CreateTensor(flatBufferBuilder,
+                                     flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
+                                                                           outputShape.size()),
+                                     tensorType,
+                                     3,
+                                     flatBufferBuilder.CreateString("output"),
+                                     quantizationParameters);
+
+    std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, alphaTensor, outputTensor };
+
+    const std::vector<int> operatorInputs{0, 1};
+    const std::vector<int> operatorOutputs{2};
+    flatbuffers::Offset <Operator> preluOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
+
+    std::vector<int> subgraphInputs{0};
+    if (!alphaIsConstant)
+    {
+        subgraphInputs.push_back(1);
+    }
+
+    const std::vector<int> subgraphOutputs{2};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&preluOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Prelu Operator Model");
+    flatbuffers::Offset <OperatorCode> opCode = CreateOperatorCode(flatBufferBuilder, preluOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&opCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+void PreluTest(tflite::BuiltinOperator preluOperatorCode,
+               tflite::TensorType tensorType,
+               const std::vector<armnn::BackendId>& backends,
+               const std::vector<int32_t>& inputShape,
+               const std::vector<int32_t>& alphaShape,
+               std::vector<int32_t>& outputShape,
+               std::vector<float>& inputData,
+               std::vector<float>& alphaData,
+               std::vector<float>& expectedOutput,
+               bool alphaIsConstant)
+{
+    using namespace tflite;
+
+    std::vector<char> modelBuffer = CreatePreluTfLiteModel(preluOperatorCode,
+                                                           tensorType,
+                                                           inputShape,
+                                                           alphaShape,
+                                                           outputShape,
+                                                           alphaData,
+                                                           alphaIsConstant);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    CHECK(tfLiteModel != nullptr);
+
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+          (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<float>(tfLiteInterpreter, 0, inputData);
+    armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 0, inputData);
+
+    // Set alpha data if not constant
+    if (!alphaIsConstant) {
+        armnnDelegate::FillInput<float>(tfLiteInterpreter, 1, alphaData);
+        armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 1, alphaData);
+    }
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+
+    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+
+    for (size_t i = 0; i < expectedOutput.size(); i++)
+    {
+        CHECK(expectedOutput[i] == armnnDelegateOutputData[i]);
+        CHECK(tfLiteDelegateOutputData[i] == expectedOutput[i]);
+        CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]);
+    }
+}
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/QuantizationTest.cpp b/delegate/test/QuantizationTest.cpp
new file mode 100644
index 0000000..0210602
--- /dev/null
+++ b/delegate/test/QuantizationTest.cpp
@@ -0,0 +1,455 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "QuantizationTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+// Dequantize operator test functions.
+void DequantizeUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 2, 4 };
+    std::vector<int32_t> outputShape { 2, 4 };
+
+    // Set input and output data
+    std::vector<uint8_t> inputValues
+    {
+        0, 1, 2, 3, // Lower bounds
+        252, 253, 254, 255 // Upper bounds
+    };
+    std::vector<float> expectedOutputValues
+    {
+        0.f, 1.f, 2.f, 3.f,
+        252.f, 253.f, 254.f, 255.f
+    };
+
+    QuantizationTest<uint8_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
+                                     ::tflite::TensorType_UINT8,
+                                     ::tflite::TensorType_FLOAT32,
+                                     backends,
+                                     inputShape,
+                                     outputShape,
+                                     inputValues,
+                                     expectedOutputValues);
+}
+
+void DequantizeInt8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 2, 4 };
+    std::vector<int32_t> outputShape { 2, 4 };
+
+    std::vector<int8_t> inputValues
+    {
+        -1, 0, 1, 2,
+        -128, -127, 126, 127
+    };
+    std::vector<float> expectedOutputValues
+    {
+        -1.f, 0.f, 1.f, 2.f,
+        -128.f, -127.f, 126.f, 127.f
+    };
+
+    QuantizationTest<int8_t , float>(tflite::BuiltinOperator_DEQUANTIZE,
+                                     ::tflite::TensorType_INT8,
+                                     ::tflite::TensorType_FLOAT32,
+                                     backends,
+                                     inputShape,
+                                     outputShape,
+                                     inputValues,
+                                     expectedOutputValues);
+}
+
+void DequantizeInt16Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 2, 5 };
+    std::vector<int32_t> outputShape { 2, 5 };
+
+    std::vector<int16_t> inputValues
+    {
+        -1, 0, 1, 2,
+        -32768, -16384, 16384, 32767
+    };
+    std::vector<float> expectedOutputValues
+    {
+        -1.f, 0.f, 1.f, 2.f,
+        -32768.f, -16384.f, 16384.f, 32767.f
+    };
+
+    QuantizationTest<int16_t, float>(tflite::BuiltinOperator_DEQUANTIZE,
+                                     ::tflite::TensorType_INT16,
+                                     ::tflite::TensorType_FLOAT32,
+                                     backends,
+                                     inputShape,
+                                     outputShape,
+                                     inputValues,
+                                     expectedOutputValues);
+}
+
+// Quantize operator test functions.
+void QuantizeFloat32Uint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 2, 4 };
+    std::vector<int32_t> outputShape { 2, 4 };
+
+    // Set input and output data
+    std::vector<float> inputValues
+    {
+         -1.f, 0.f, 1.f, 2.f, // Lower bounds
+         252.f, 253.f, 255.f, 256.f // Upper bounds
+    };
+    std::vector<uint8_t> expectedOutputValues
+    {
+        0, 0, 1, 2,
+        252, 253, 255, 255
+    };
+
+    QuantizationTest<float, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
+                                     ::tflite::TensorType_FLOAT32,
+                                     ::tflite::TensorType_UINT8,
+                                     backends,
+                                     inputShape,
+                                     outputShape,
+                                     inputValues,
+                                     expectedOutputValues);
+}
+
+void QuantizeFloat32Int8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 2, 4 };
+    std::vector<int32_t> outputShape { 2, 4 };
+
+    std::vector<float> inputValues
+    {
+        -1.f, 0.f, 1.f, 2.f,
+        -128.5f, -127.f, 126.f, 127.5f
+    };
+    std::vector<int8_t> expectedOutputValues
+    {
+        -1, 0, 1, 2,
+        -128, -127, 126, 127
+    };
+
+    QuantizationTest<float, int8_t>(tflite::BuiltinOperator_QUANTIZE,
+                                     ::tflite::TensorType_FLOAT32,
+                                     ::tflite::TensorType_INT8,
+                                     backends,
+                                     inputShape,
+                                     outputShape,
+                                     inputValues,
+                                     expectedOutputValues);
+}
+
+void QuantizeFloat32Int16Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 2, 4 };
+    std::vector<int32_t> outputShape { 2, 4 };
+
+    std::vector<float> inputValues
+    {
+        -1.f, 0.f, 1.f, 2.f,
+        -32768.5f, -16384.f, 16384.f, 32767.5f
+    };
+    std::vector<int16_t> expectedOutputValues
+    {
+        -1, 0, 1, 2,
+        -32768, -16384, 16384, 32767
+    };
+
+    QuantizationTest<float, int16_t>(tflite::BuiltinOperator_QUANTIZE,
+                                    ::tflite::TensorType_FLOAT32,
+                                    ::tflite::TensorType_INT16,
+                                    backends,
+                                    inputShape,
+                                    outputShape,
+                                    inputValues,
+                                    expectedOutputValues);
+}
+
+void QuantizeInt16Int16Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 2, 4 };
+    std::vector<int32_t> outputShape { 2, 4 };
+
+    std::vector<int16_t> inputValues
+    {
+        -1, 0, 1, 2,
+        -32768, -16384, 16384, 32767
+    };
+    std::vector<int16_t> expectedOutputValues
+    {
+        -1, 0, 1, 2,
+        -32768, -16384, 16384, 32767
+    };
+
+    QuantizationTest<int16_t, int16_t>(tflite::BuiltinOperator_QUANTIZE,
+                                     ::tflite::TensorType_INT16,
+                                     ::tflite::TensorType_INT16,
+                                     backends,
+                                     inputShape,
+                                     outputShape,
+                                     inputValues,
+                                     expectedOutputValues);
+}
+
+void QuantizeInt16Int8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 2, 4 };
+    std::vector<int32_t> outputShape { 2, 4 };
+
+    std::vector<int16_t> inputValues
+    {
+        -1, 0, 1, 2,
+        -32768, -16384, 16384, 32767
+    };
+    std::vector<int8_t> expectedOutputValues
+    {
+        -1, 0, 1, 2,
+        -128, -128, 127, 127
+    };
+
+    QuantizationTest<int16_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
+                                       ::tflite::TensorType_INT16,
+                                       ::tflite::TensorType_INT8,
+                                       backends,
+                                       inputShape,
+                                       outputShape,
+                                       inputValues,
+                                       expectedOutputValues);
+}
+
+void QuantizeInt8Uint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 2, 4 };
+    std::vector<int32_t> outputShape { 2, 4 };
+
+    std::vector<int8_t> inputValues
+    {
+        -1, 0, 1, 2,
+        -128, -127, 126, 127
+    };
+    std::vector<uint8_t> expectedOutputValues
+    {
+        0, 0, 1, 2,
+        0, 0, 126, 127
+    };
+
+    QuantizationTest<int8_t, uint8_t>(tflite::BuiltinOperator_QUANTIZE,
+                                      ::tflite::TensorType_INT8,
+                                      ::tflite::TensorType_UINT8,
+                                      backends,
+                                      inputShape,
+                                      outputShape,
+                                      inputValues,
+                                      expectedOutputValues);
+}
+
+void QuantizeUint8Int8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 2, 4 };
+    std::vector<int32_t> outputShape { 2, 4 };
+
+    std::vector<uint8_t> inputValues
+    {
+        0, 1, 2, 3,
+        126, 127, 254, 255
+    };
+    std::vector<int8_t> expectedOutputValues
+    {
+        0, 1, 2, 3,
+        126, 127, 127, 127
+    };
+
+    QuantizationTest<uint8_t, int8_t>(tflite::BuiltinOperator_QUANTIZE,
+                                      ::tflite::TensorType_UINT8,
+                                      ::tflite::TensorType_INT8,
+                                      backends,
+                                      inputShape,
+                                      outputShape,
+                                      inputValues,
+                                      expectedOutputValues);
+}
+
+TEST_SUITE("CpuRef_QuantizationTests")
+{
+
+TEST_CASE ("DEQUANTIZE_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    DequantizeUint8Test(backends);
+}
+
+
+TEST_CASE ("DEQUANTIZE_INT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    DequantizeInt8Test(backends);
+}
+
+
+TEST_CASE ("DEQUANTIZE_INT16_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    DequantizeInt16Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeFloat32Uint8Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeFloat32Int8Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_FLOAT32_INT16_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeFloat32Int16Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_INT16_INT16_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeInt16Int16Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_INT16_INT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeInt16Int8Test(backends);
+}
+
+
+
+TEST_CASE ("QUANTIZE_INT8_UINT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeInt8Uint8Test(backends);
+}
+
+
+TEST_CASE ("QUANTIZE_UINT8_INT8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    QuantizeUint8Int8Test(backends);
+}
+
+}
+
+TEST_SUITE("CpuAcc_QuantizationTests")
+{
+
+// Dequantize Operator Tests
+TEST_CASE ("DEQUANTIZE_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    DequantizeUint8Test(backends);
+}
+
+TEST_CASE ("DEQUANTIZE_INT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    DequantizeInt8Test(backends);
+}
+
+TEST_CASE ("DEQUANTIZE_INT16_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    DequantizeInt16Test(backends);
+}
+
+// Quantize Operator Tests
+TEST_CASE ("QUANTIZE_FLOAT32_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    QuantizeFloat32Uint8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_FLOAT32_INT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    QuantizeFloat32Int8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_INT8_UINT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    QuantizeInt8Uint8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_UINT8_INT8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    QuantizeUint8Int8Test(backends);
+}
+
+}
+
+TEST_SUITE("GpuAcc_QuantizationTests")
+{
+
+// Dequantize Operator Tests
+TEST_CASE ("DEQUANTIZE_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    DequantizeUint8Test(backends);
+}
+
+TEST_CASE ("DEQUANTIZE_INT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    DequantizeInt8Test(backends);
+}
+
+TEST_CASE ("DEQUANTIZE_INT16_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    DequantizeInt16Test(backends);
+}
+
+// Quantize Operator Tests
+TEST_CASE ("QUANTIZE_FLOAT32_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    QuantizeFloat32Uint8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_FLOAT32_INT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    QuantizeFloat32Int8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_INT8_UINT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    QuantizeInt8Uint8Test(backends);
+}
+
+TEST_CASE ("QUANTIZE_UINT8_INT8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    QuantizeUint8Int8Test(backends);
+}
+
+}
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/QuantizationTestHelper.hpp b/delegate/test/QuantizationTestHelper.hpp
new file mode 100644
index 0000000..af898f3
--- /dev/null
+++ b/delegate/test/QuantizationTestHelper.hpp
@@ -0,0 +1,200 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateQuantizationTfLiteModel(tflite::BuiltinOperator quantizationOperatorCode,
+                                                tflite::TensorType inputTensorType,
+                                                tflite::TensorType outputTensorType,
+                                                const std::vector <int32_t>& inputTensorShape,
+                                                const std::vector <int32_t>& outputTensorShape,
+                                                float quantScale = 1.0f,
+                                                int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }),
+                                         QuantizationDetails_CustomQuantization);
+
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              inputTensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              outputTensorType,
+                              2,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
+    flatbuffers::Offset<void> operatorBuiltinOptions = 0;
+    switch (quantizationOperatorCode)
+    {
+        case BuiltinOperator_QUANTIZE:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_QuantizeOptions;
+            operatorBuiltinOptions = CreateQuantizeOptions(flatBufferBuilder).Union();
+            break;
+        }
+        case BuiltinOperator_DEQUANTIZE:
+        {
+            operatorBuiltinOptionsType = BuiltinOptions_DequantizeOptions;
+            operatorBuiltinOptions = CreateDequantizeOptions(flatBufferBuilder).Union();
+            break;
+        }
+        default:
+            break;
+    }
+
+    const std::vector<int32_t> operatorInputs{0};
+    const std::vector<int32_t> operatorOutputs{1};
+    flatbuffers::Offset <Operator> quantizationOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{0};
+    const std::vector<int> subgraphOutputs{1};
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&quantizationOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: Quantization Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, quantizationOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename InputT, typename OutputT>
+void QuantizationTest(tflite::BuiltinOperator quantizeOperatorCode,
+                      tflite::TensorType inputTensorType,
+                      tflite::TensorType outputTensorType,
+                      std::vector<armnn::BackendId>& backends,
+                      std::vector<int32_t>& inputShape,
+                      std::vector<int32_t>& outputShape,
+                      std::vector<InputT>&  inputValues,
+                      std::vector<OutputT>& expectedOutputValues,
+                      float quantScale = 1.0f,
+                      int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateQuantizationTfLiteModel(quantizeOperatorCode,
+                                                                  inputTensorType,
+                                                                  outputTensorType,
+                                                                  inputShape,
+                                                                  outputShape,
+                                                                  quantScale,
+                                                                  quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<InputT>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelageInputData[i] = inputValues[i];
+    }
+
+    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<InputT>(armnnDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        armnnDelegateInputData[i] = inputValues[i];
+    }
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<OutputT>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<OutputT>(armnnDelegateOutputId);
+
+    for (size_t i = 0; i < expectedOutputValues.size(); i++)
+    {
+        CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
+        CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]);
+        CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
+    }
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/RedefineTestHelper.hpp b/delegate/test/RedefineTestHelper.hpp
new file mode 100644
index 0000000..ce60db0
--- /dev/null
+++ b/delegate/test/RedefineTestHelper.hpp
@@ -0,0 +1,202 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateRedefineTfLiteModel(
+        tflite::BuiltinOperator redefineOperatorCode,
+        tflite::TensorType tensorType,
+        const std::vector<int32_t>& inputTensorShape,
+        const std::vector<int32_t>& outputTensorShape,
+        const std::vector<int32_t>& targetShape,
+        bool useOption = true,
+        float quantScale = 1.0f,
+        int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    auto inputTensor = CreateTensor(flatBufferBuilder,
+                                    flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                            inputTensorShape.size()),
+                                    tensorType,
+                                    1,
+                                    flatBufferBuilder.CreateString("input"),
+                                    quantizationParameters);
+
+    std::vector<flatbuffers::Offset<Tensor>> tensors;
+    std::vector<int32_t> operatorInputs;
+    std::vector<int> subgraphInputs;
+    flatbuffers::Offset<void> operatorBuiltinOptions;
+
+    if (useOption)
+    {
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
+        auto outputTensor = CreateTensor(flatBufferBuilder,
+                                         flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                                 outputTensorShape.size()),
+                                         tensorType,
+                                         2,
+                                         flatBufferBuilder.CreateString("output"),
+                                         quantizationParameters);
+        tensors = { inputTensor, outputTensor};
+        operatorInputs = {0};
+        subgraphInputs = {0};
+        operatorBuiltinOptions = CreateReshapeOptions(
+                flatBufferBuilder,
+                flatBufferBuilder.CreateVector(targetShape.data(), targetShape.size())).Union();
+    }
+    else
+    {
+        buffers.push_back(
+                CreateBuffer(flatBufferBuilder,
+                             flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(targetShape.data()),
+                                                            sizeof(int32_t) * targetShape.size())));
+        int32_t size = static_cast<int32_t>(targetShape.size());
+        auto shapeTensor = CreateTensor(flatBufferBuilder,
+                                        flatBufferBuilder.CreateVector<int32_t>( { size } ),
+                                        tflite::TensorType_INT32,
+                                        2,
+                                        flatBufferBuilder.CreateString("shape"));
+
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
+        auto outputTensor = CreateTensor(flatBufferBuilder,
+                                         flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                                 outputTensorShape.size()),
+                                         tensorType,
+                                         3,
+                                         flatBufferBuilder.CreateString("output"),
+                                         quantizationParameters);
+
+        tensors = { inputTensor, outputTensor, shapeTensor };
+        operatorInputs = {0, 2};
+        subgraphInputs = {0, 2};
+        operatorBuiltinOptions = CreateReshapeOptions(flatBufferBuilder).Union();
+    }
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_ReshapeOptions;
+
+    const std::vector<int32_t> operatorOutputs{1};
+    flatbuffers::Offset <Operator> redefineOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphOutputs{1};
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&redefineOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: Reshape Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         redefineOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void RedefineTest(tflite::BuiltinOperator redefineOperatorCode,
+                  tflite::TensorType tensorType,
+                  const std::vector<armnn::BackendId>& backends,
+                  const std::vector<int32_t>& inputShape,
+                  std::vector<int32_t>& outputShape,
+                  std::vector<T>& inputValues,
+                  std::vector<T>& expectedOutputValues,
+                  std::vector<int32_t>& targetShape,
+                  bool useOption = true,
+                  float quantScale = 1.0f,
+                  int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
+                                                              tensorType,
+                                                              inputShape,
+                                                              outputShape,
+                                                              targetShape,
+                                                              useOption,
+                                                              quantScale,
+                                                              quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    CHECK(tfLiteModel != nullptr);
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ReduceTest.cpp b/delegate/test/ReduceTest.cpp
new file mode 100644
index 0000000..5c031d9
--- /dev/null
+++ b/delegate/test/ReduceTest.cpp
@@ -0,0 +1,423 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ReduceTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void ReduceUint8KeepDimsTest(tflite::BuiltinOperator reduceOperatorCode,
+                             std::vector<armnn::BackendId>& backends,
+                             std::vector<uint8_t>& expectedOutputValues)
+{
+    std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
+    std::vector<int32_t> input1Shape { 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 1, 1, 3 };
+
+    std::vector<uint8_t> input0Values { 1, 2, 3,
+                                        4, 3, 1  }; // Inputs
+    std::vector<int32_t> input1Values { 2 }; // Axis
+
+    ReduceTest<uint8_t>(reduceOperatorCode,
+                        ::tflite::TensorType_UINT8,
+                        backends,
+                        input0Shape,
+                        input1Shape,
+                        expectedOutputShape,
+                        input0Values,
+                        input1Values,
+                        expectedOutputValues,
+                        true);
+}
+
+void ReduceUint8Test(tflite::BuiltinOperator reduceOperatorCode,
+                     std::vector<armnn::BackendId>& backends,
+                     std::vector<uint8_t>& expectedOutputValues)
+{
+    std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
+    std::vector<int32_t> input1Shape { 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 1, 3 };
+
+    std::vector<uint8_t> input0Values { 1, 2, 3,
+                                        4, 3, 1 }; // Inputs
+    std::vector<int32_t> input1Values { 2 }; // Axis
+
+    ReduceTest<uint8_t>(reduceOperatorCode,
+                        ::tflite::TensorType_UINT8,
+                        backends,
+                        input0Shape,
+                        input1Shape,
+                        expectedOutputShape,
+                        input0Values,
+                        input1Values,
+                        expectedOutputValues,
+                        false);
+}
+
+void ReduceFp32KeepDimsTest(tflite::BuiltinOperator reduceOperatorCode,
+                            std::vector<armnn::BackendId>& backends,
+                            std::vector<float>& expectedOutputValues)
+{
+    std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
+    std::vector<int32_t> input1Shape { 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 1, 1, 3 };
+
+    std::vector<float>   input0Values { 1001.0f, 11.0f,   1003.0f,
+                                        10.0f,   1002.0f, 12.0f }; // Inputs
+    std::vector<int32_t> input1Values { 2 }; // Axis
+
+    ReduceTest<float>(reduceOperatorCode,
+                      ::tflite::TensorType_FLOAT32,
+                      backends,
+                      input0Shape,
+                      input1Shape,
+                      expectedOutputShape,
+                      input0Values,
+                      input1Values,
+                      expectedOutputValues,
+                      true);
+}
+
+void ReduceFp32Test(tflite::BuiltinOperator reduceOperatorCode,
+                    std::vector<armnn::BackendId>& backends,
+                    std::vector<float>& expectedOutputValues)
+{
+    std::vector<int32_t> input0Shape { 1, 1, 2, 3 };
+    std::vector<int32_t> input1Shape { 1 };
+    std::vector<int32_t> expectedOutputShape { 1, 1, 3 };
+
+    std::vector<float>   input0Values { 1001.0f, 11.0f,   1003.0f,
+                                        10.0f,   1002.0f, 12.0f }; // Inputs
+    std::vector<int32_t> input1Values { 2 }; // Axis
+
+    ReduceTest<float>(reduceOperatorCode,
+                      ::tflite::TensorType_FLOAT32,
+                      backends,
+                      input0Shape,
+                      input1Shape,
+                      expectedOutputShape,
+                      input0Values,
+                      input1Values,
+                      expectedOutputValues,
+                      false);
+}
+
+// REDUCE_MAX Tests
+TEST_SUITE("ReduceMax_CpuRefTests")
+{
+
+TEST_CASE ("ReduceMax_Uint8_KeepDims_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
+    ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
+                            backends,
+                            expectedOutputValues);
+}
+
+TEST_CASE ("ReduceMax_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
+    ReduceUint8Test(tflite::BuiltinOperator_REDUCE_MAX,
+                    backends,
+                    expectedOutputValues);
+}
+
+TEST_CASE ("ReduceMax_Fp32_KeepDims_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    std::vector<float>   expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
+    ReduceFp32KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
+                           backends,
+                           expectedOutputValues);
+}
+
+TEST_CASE ("ReduceMax_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    std::vector<float>   expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MAX,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of ReduceMax_CpuRefTests
+
+TEST_SUITE("ReduceMax_CpuAccTests")
+{
+
+TEST_CASE ("ReduceMax_Uint8_KeepDims_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
+    ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
+                            backends,
+                            expectedOutputValues);
+}
+
+TEST_CASE ("ReduceMax_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
+    ReduceUint8Test(tflite::BuiltinOperator_REDUCE_MAX,
+                    backends,
+                    expectedOutputValues);
+}
+
+
+TEST_CASE ("ReduceMax_Fp32_KeepDims_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    std::vector<float>   expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
+    ReduceFp32KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
+                           backends,
+                           expectedOutputValues);
+}
+
+TEST_CASE ("ReduceMax_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    std::vector<float>   expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MAX,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of ReduceMax_CpuAccTests
+
+TEST_SUITE("ReduceMax_GpuAccTests")
+{
+
+TEST_CASE ("ReduceMax_Uint8_KeepDims_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
+    ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
+                            backends,
+                            expectedOutputValues);
+}
+
+TEST_CASE ("ReduceMax_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    std::vector<uint8_t> expectedOutputValues { 4, 3, 3 };
+    ReduceUint8Test(tflite::BuiltinOperator_REDUCE_MAX,
+                    backends,
+                    expectedOutputValues);
+}
+
+
+TEST_CASE ("ReduceMax_Fp32_KeepDims_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    std::vector<float>   expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
+    ReduceFp32KeepDimsTest(tflite::BuiltinOperator_REDUCE_MAX,
+                           backends,
+                           expectedOutputValues);
+}
+
+TEST_CASE ("ReduceMax_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    std::vector<float>   expectedOutputValues { 1001.0f, 1002.0f, 1003.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MAX,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of ReduceMax_GpuAccTests
+
+// REDUCE_MIN Tests
+TEST_SUITE("ReduceMin_CpuRefTests")
+{
+
+TEST_CASE ("ReduceMin_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    std::vector<float>   expectedOutputValues { 10.0f, 11.0f, 12.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MIN,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of ReduceMin_CpuRefTests
+
+TEST_SUITE("ReduceMin_CpuAccTests")
+{
+
+TEST_CASE ("ReduceMin_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    std::vector<float>   expectedOutputValues { 10.0f, 11.0f, 12.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MIN,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of ReduceMin_CpuAccTests
+
+TEST_SUITE("ReduceMin_GpuAccTests")
+{
+
+TEST_CASE ("ReduceMin_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    std::vector<float>   expectedOutputValues { 10.0f, 11.0f, 12.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_REDUCE_MIN,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of ReduceMin_GpuAccTests
+
+// SUM Tests
+TEST_SUITE("Sum_CpuRefTests")
+{
+
+TEST_CASE ("Sum_Uint8_KeepDims_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    std::vector<uint8_t> expectedOutputValues { 5, 5, 4 };
+    ReduceUint8KeepDimsTest(tflite::BuiltinOperator_SUM,
+                            backends,
+                            expectedOutputValues);
+}
+
+TEST_CASE ("Sum_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    std::vector<float>   expectedOutputValues { 1011.0f, 1013.0f, 1015.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_SUM,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of Sum_CpuRefTests
+
+TEST_SUITE("Sum_CpuAccTests")
+{
+
+TEST_CASE ("Sum_Uint8_KeepDims_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    std::vector<uint8_t> expectedOutputValues { 5, 5, 4 };
+    ReduceUint8KeepDimsTest(tflite::BuiltinOperator_SUM,
+                            backends,
+                            expectedOutputValues);
+}
+
+TEST_CASE ("Sum_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    std::vector<float>   expectedOutputValues { 1011.0f, 1013.0f, 1015.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_SUM,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of Sum_CpuAccTests
+
+TEST_SUITE("Sum_GpuAccTests")
+{
+
+TEST_CASE ("Sum_Uint8_KeepDims_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    std::vector<uint8_t> expectedOutputValues { 5, 5, 4 };
+    ReduceUint8KeepDimsTest(tflite::BuiltinOperator_SUM,
+                            backends,
+                            expectedOutputValues);
+}
+
+TEST_CASE ("Sum_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    std::vector<float>   expectedOutputValues { 1011.0f, 1013.0f, 1015.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_SUM,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of Sum_GpuAccTests
+
+// PROD Tests
+TEST_SUITE("Prod_CpuRefTests")
+{
+
+TEST_CASE ("Prod_Uint8_KeepDims_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    std::vector<uint8_t> expectedOutputValues { 4, 6, 3 };
+    ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_PROD,
+                            backends,
+                            expectedOutputValues);
+}
+
+TEST_CASE ("Prod_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    std::vector<float>   expectedOutputValues { 10010.0f, 11022.0f, 12036.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_REDUCE_PROD,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of Prod_CpuRefTests
+
+TEST_SUITE("Prod_CpuAccTests")
+{
+
+TEST_CASE ("Prod_Uint8_KeepDims_CpuAcc_Test" )
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    std::vector<uint8_t> expectedOutputValues { 4, 6, 3 };
+    ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_PROD,
+                            backends,
+                            expectedOutputValues);
+}
+
+TEST_CASE ("Prod_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    std::vector<float>   expectedOutputValues { 10010.0f, 11022.0f, 12036.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_REDUCE_PROD,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of Prod_CpuAccTests
+
+TEST_SUITE("Prod_GpuAccTests")
+{
+
+TEST_CASE ("Prod_Uint8_KeepDims_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    std::vector<uint8_t> expectedOutputValues { 4, 6, 3 };
+    ReduceUint8KeepDimsTest(tflite::BuiltinOperator_REDUCE_PROD,
+                            backends,
+                            expectedOutputValues);
+}
+
+TEST_CASE ("Prod_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    std::vector<float>   expectedOutputValues { 10010.0f, 11022.0f, 12036.0f };
+    ReduceFp32Test(tflite::BuiltinOperator_REDUCE_PROD,
+                   backends,
+                   expectedOutputValues);
+}
+
+} // End of Prod_GpuAccTests
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ReduceTestHelper.hpp b/delegate/test/ReduceTestHelper.hpp
new file mode 100644
index 0000000..fedf7ee
--- /dev/null
+++ b/delegate/test/ReduceTestHelper.hpp
@@ -0,0 +1,228 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+#include <string>
+
+namespace
+{
+
+std::vector<char> CreateReduceTfLiteModel(tflite::BuiltinOperator reduceOperatorCode,
+                                          tflite::TensorType tensorType,
+                                          std::vector<int32_t>& input0TensorShape,
+                                          std::vector<int32_t>& input1TensorShape,
+                                          const std::vector <int32_t>& outputTensorShape,
+                                          std::vector<int32_t>& axisData,
+                                          const bool keepDims,
+                                          float quantScale = 1.0f,
+                                          int quantOffset  = 0,
+                                          bool kTfLiteNoQuantizationForQuantized = false)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    flatbuffers::Offset<tflite::Buffer> buffers[4] = {
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
+                                                        sizeof(int32_t) * axisData.size())),
+            CreateBuffer(flatBufferBuilder)
+    };
+
+    flatbuffers::Offset<tflite::QuantizationParameters> quantizationParametersAxis
+            = CreateQuantizationParameters(flatBufferBuilder);
+
+    flatbuffers::Offset<tflite::QuantizationParameters> quantizationParameters;
+
+    if (kTfLiteNoQuantizationForQuantized)
+    {
+        if ((quantScale == 1 || quantScale == 0) && quantOffset == 0)
+        {
+            // Creates quantization parameter with quantization.type = kTfLiteNoQuantization
+            quantizationParameters = CreateQuantizationParameters(flatBufferBuilder);
+        }
+        else
+        {
+            // Creates quantization parameter with quantization.type != kTfLiteNoQuantization
+            quantizationParameters = CreateQuantizationParameters(
+                    flatBufferBuilder,
+                    0,
+                    0,
+                    flatBufferBuilder.CreateVector<float>({quantScale}),
+                    flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
+        }
+    }
+    else
+    {
+        quantizationParameters = CreateQuantizationParameters(
+                flatBufferBuilder,
+                0,
+                0,
+                flatBufferBuilder.CreateVector<float>({quantScale}),
+                flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
+    }
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
+                                                                      input0TensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
+                                                                      input1TensorShape.size()),
+                              ::tflite::TensorType_INT32,
+                              2,
+                              flatBufferBuilder.CreateString("axis"),
+                              quantizationParametersAxis);
+
+    // Create output tensor
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              3,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // Create operator. Reduce operations MIN, MAX, SUM, MEAN, PROD uses ReducerOptions.
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_ReducerOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateReducerOptions(flatBufferBuilder, keepDims).Union();
+
+    const std::vector<int> operatorInputs{ {0, 1} };
+    const std::vector<int> operatorOutputs{ 2 };
+    flatbuffers::Offset <Operator> reduceOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{ {0, 1} };
+    const std::vector<int> subgraphOutputs{ 2 };
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&reduceOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: Reduce Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, reduceOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers, 4));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void ReduceTest(tflite::BuiltinOperator reduceOperatorCode,
+                tflite::TensorType tensorType,
+                std::vector<armnn::BackendId>& backends,
+                std::vector<int32_t>& input0Shape,
+                std::vector<int32_t>& input1Shape,
+                std::vector<int32_t>& expectedOutputShape,
+                std::vector<T>& input0Values,
+                std::vector<int32_t>& input1Values,
+                std::vector<T>& expectedOutputValues,
+                const bool keepDims,
+                float quantScale = 1.0f,
+                int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBufferArmNN = CreateReduceTfLiteModel(reduceOperatorCode,
+                                                                 tensorType,
+                                                                 input0Shape,
+                                                                 input1Shape,
+                                                                 expectedOutputShape,
+                                                                 input1Values,
+                                                                 keepDims,
+                                                                 quantScale,
+                                                                 quantOffset,
+                                                                 false);
+    std::vector<char> modelBufferTFLite = CreateReduceTfLiteModel(reduceOperatorCode,
+                                                                  tensorType,
+                                                                  input0Shape,
+                                                                  input1Shape,
+                                                                  expectedOutputShape,
+                                                                  input1Values,
+                                                                  keepDims,
+                                                                  quantScale,
+                                                                  quantOffset,
+                                                                  true);
+
+    const Model* tfLiteModelArmNN = GetModel(modelBufferArmNN.data());
+    const Model* tfLiteModelTFLite = GetModel(modelBufferTFLite.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModelArmNN, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModelTFLite, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
+                                        armnnDelegateInterpreter,
+                                        expectedOutputShape,
+                                        expectedOutputValues);
+
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ReshapeTest.cpp b/delegate/test/ReshapeTest.cpp
new file mode 100644
index 0000000..c3df8b2
--- /dev/null
+++ b/delegate/test/ReshapeTest.cpp
@@ -0,0 +1,517 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RedefineTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+#include <half/half.hpp>
+
+using Half = half_float::half;
+
+namespace armnnDelegate
+{
+
+void ReshapeSimpleTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 3, 2, 2 };
+    std::vector<int32_t> targetShape { 1, 3, 2, 2 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                                8.0f, 12.0f, -15.0f, 2.0f,
+                                                3.0f, -4.0f, -1.0f, -11.0f };
+
+    RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
+                        ::tflite::TensorType_FLOAT32,
+                        backends,
+                        inputShape,
+                        outputShape,
+                        inputValues,
+                        expectedOutputValues,
+                        targetShape,
+                        useOption);
+}
+
+using namespace half_float::literal;
+
+void ReshapeSimpleFloat16Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 3, 2, 2 };
+    std::vector<int32_t> targetShape { 1, 3, 2, 2 };
+
+    std::vector<Half> inputValues = { 5._h, -8._h, -10._h, 7._h,
+                                      8._h, 12._h, -15._h, 2._h,
+                                      3._h, -4._h, -1._h, -11._h };
+
+    std::vector<Half> expectedOutputValues = { 5._h, -8._h, -10._h, 7._h,
+                                               8._h, 12._h, -15._h, 2._h,
+                                               3._h, -4._h, -1._h, -11._h };
+
+    RedefineTest<Half>(tflite::BuiltinOperator_RESHAPE,
+                        ::tflite::TensorType_FLOAT16,
+                        backends,
+                        inputShape,
+                        outputShape,
+                        inputValues,
+                        expectedOutputValues,
+                        targetShape,
+                        useOption);
+}
+
+void ReshapeReduceDimTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 1, 4, 3 };
+    std::vector<int32_t> targetShape { 1, 4, 3 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                                8.0f, 12.0f, -15.0f, 2.0f,
+                                                3.0f, -4.0f, -1.0f, -11.0f };
+
+    RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
+                        ::tflite::TensorType_FLOAT32,
+                        backends,
+                        inputShape,
+                        outputShape,
+                        inputValues,
+                        expectedOutputValues,
+                        targetShape,
+                        useOption);
+}
+
+void ReshapeFlattenTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 6, 2 };
+    std::vector<int32_t> targetShape { -1, 2 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                                8.0f, 12.0f, -15.0f, 2.0f,
+                                                3.0f, -4.0f, -1.0f, -11.0f };
+
+    RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
+                        ::tflite::TensorType_FLOAT32,
+                        backends,
+                        inputShape,
+                        outputShape,
+                        inputValues,
+                        expectedOutputValues,
+                        targetShape,
+                        useOption);
+}
+
+void ReshapeFlattenAllTest(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 12 };
+    std::vector<int32_t> targetShape { -1 };
+
+    std::vector<float> inputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                       8.0f, 12.0f, -15.0f, 2.0f,
+                                       3.0f, -4.0f, -1.0f, -11.0f };
+
+    std::vector<float> expectedOutputValues = { -5.0f, 8.0f, -10.0f, 7.0f,
+                                                8.0f, 12.0f, -15.0f, 2.0f,
+                                                3.0f, -4.0f, -1.0f, -11.0f };
+
+    RedefineTest<float>(tflite::BuiltinOperator_RESHAPE,
+                        ::tflite::TensorType_FLOAT32,
+                        backends,
+                        inputShape,
+                        outputShape,
+                        inputValues,
+                        expectedOutputValues,
+                        targetShape,
+                        useOption);
+}
+
+void ReshapeInt8Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 6, 2 };
+    std::vector<int32_t> targetShape { -1, 2 };
+
+    std::vector<int8_t> inputValues = { -5, 8, -10, 7,
+                                        8, 12, -15, 2,
+                                        3, -4, -1, -11 };
+
+    std::vector<int8_t> expectedOutputValues = { -5, 8, -10, 7,
+                                                 8, 12, -15, 2,
+                                                 3, -4, -1, -11 };
+
+    RedefineTest<int8_t>(tflite::BuiltinOperator_RESHAPE,
+                         ::tflite::TensorType_INT8,
+                         backends,
+                         inputShape,
+                         outputShape,
+                         inputValues,
+                         expectedOutputValues,
+                         targetShape,
+                         useOption,
+                         2.5f,
+                         1);
+}
+
+void ReshapeUint8Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 6, 2 };
+    std::vector<int32_t> targetShape { -1, 2 };
+
+    std::vector<uint8_t> inputValues = { 5, 8, 10, 7,
+                                         8, 12, 15, 2,
+                                         3, 4, 1, 11 };
+
+    std::vector<uint8_t> expectedOutputValues = { 5, 8, 10, 7,
+                                                  8, 12, 15, 2,
+                                                  3, 4, 1, 11 };
+
+    RedefineTest<uint8_t>(tflite::BuiltinOperator_RESHAPE,
+                          ::tflite::TensorType_UINT8,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          targetShape,
+                          useOption,
+                          2.5f,
+                          1);
+}
+
+void ReshapeInt16Test(std::vector<armnn::BackendId>& backends, bool useOption = true)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 3, 4, 1 };
+    std::vector<int32_t> outputShape { 6, 2 };
+    std::vector<int32_t> targetShape { -1, 2 };
+
+    std::vector<int16_t> inputValues = { -5, 8, -10, 7,
+                                         8, 12, -15, 2,
+                                         3, -4, -1, -11 };
+
+    std::vector<int16_t> expectedOutputValues = { -5, 8, -10, 7,
+                                                  8, 12, -15, 2,
+                                                  3, -4, -1, -11 };
+
+    RedefineTest<int16_t>(tflite::BuiltinOperator_RESHAPE,
+                          ::tflite::TensorType_INT16,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          targetShape,
+                          useOption,
+                          2.5f,
+                          0);
+}
+
+TEST_SUITE("Reshape_GpuAccTests")
+{
+
+TEST_CASE ("Reshape_Simple_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeSimpleTest(backends);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeReduceDimTest(backends);
+}
+
+TEST_CASE ("Reshape_Flatten_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeFlattenTest(backends);
+}
+
+TEST_CASE ("Reshape_FlattenAll_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeFlattenAllTest(backends);
+}
+
+TEST_CASE ("Reshape_Int8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeInt8Test(backends);
+}
+
+TEST_CASE ("Reshape_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeUint8Test(backends);
+}
+
+TEST_CASE ("Reshape_Float16_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeSimpleFloat16Test(backends);
+}
+
+TEST_CASE ("Reshape_Simple_ShapeTensor_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeSimpleTest(backends, false);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeReduceDimTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Flatten_ShapeTensor_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeFlattenTest(backends, false);
+}
+
+TEST_CASE ("Reshape_FlattenAll_ShapeTensor_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeFlattenAllTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Int8_ShapeTensor_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeInt8Test(backends, false);
+}
+
+TEST_CASE ("Reshape_Uint8_ShapeTensor_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeUint8Test(backends, false);
+}
+
+TEST_CASE ("Reshape_Float16_ShapeTensor_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ReshapeSimpleFloat16Test(backends, false);
+}
+
+} // TEST_SUITE("Reshape_GpuAccTests")
+
+TEST_SUITE("Reshape_CpuAccTests")
+{
+
+TEST_CASE ("Reshape_Simple_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeSimpleTest(backends);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeReduceDimTest(backends);
+}
+
+TEST_CASE ("Reshape_Flatten_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeFlattenTest(backends);
+}
+
+TEST_CASE ("Reshape_FlattenAll_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeFlattenAllTest(backends);
+}
+
+TEST_CASE ("Reshape_Int8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeInt8Test(backends);
+}
+
+TEST_CASE ("Reshape_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeUint8Test(backends);
+}
+
+TEST_CASE ("Reshape_Float16_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeSimpleFloat16Test(backends);
+}
+
+TEST_CASE ("Reshape_Simple_ShapeTensor_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeSimpleTest(backends, false);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeReduceDimTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Flatten_ShapeTensor_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeFlattenTest(backends, false);
+}
+
+TEST_CASE ("Reshape_FlattenAll_ShapeTensor_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeFlattenAllTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Int8_ShapeTensor_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeInt8Test(backends, false);
+}
+
+TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeUint8Test(backends, false);
+}
+
+TEST_CASE ("Reshape_Float16_ShapeTensor_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ReshapeSimpleFloat16Test(backends, false);
+}
+
+} // TEST_SUITE("Reshape_CpuAccTests")
+
+TEST_SUITE("Reshape_CpuRefTests")
+{
+
+TEST_CASE ("Reshape_Simple_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeSimpleTest(backends);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeReduceDimTest(backends);
+}
+
+TEST_CASE ("Reshape_Flatten_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeFlattenTest(backends);
+}
+
+TEST_CASE ("Reshape_FlattenAll_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeFlattenAllTest(backends);
+}
+
+TEST_CASE ("Reshape_Int8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeInt8Test(backends);
+}
+
+TEST_CASE ("Reshape_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeUint8Test(backends);
+}
+
+TEST_CASE ("Reshape_Int16_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeInt16Test(backends);
+}
+
+TEST_CASE ("Reshape_Float16_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeSimpleFloat16Test(backends);
+}
+
+TEST_CASE ("Reshape_Simple_ShapeTensor_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeSimpleTest(backends, false);
+}
+
+TEST_CASE ("Reshape_ReduceDimension_ShapeTensor_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeReduceDimTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Flatten_ShapeTensor_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeFlattenTest(backends, false);
+}
+
+TEST_CASE ("Reshape_FlattenAll_ShapeTensor_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeFlattenAllTest(backends, false);
+}
+
+TEST_CASE ("Reshape_Int8_ShapeTensor_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeInt8Test(backends, false);
+}
+
+TEST_CASE ("Reshape_Uint8_ShapeTensor_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeUint8Test(backends, false);
+}
+
+TEST_CASE ("Reshape_Int16_ShapeTensor_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeInt16Test(backends, false);
+}
+
+TEST_CASE ("Reshape_Float16_ShapeTensor_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ReshapeSimpleFloat16Test(backends, false);
+}
+
+} // TEST_SUITE("Reshape_CpuRefTests")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ResizeTest.cpp b/delegate/test/ResizeTest.cpp
new file mode 100644
index 0000000..2011387
--- /dev/null
+++ b/delegate/test/ResizeTest.cpp
@@ -0,0 +1,134 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ResizeTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void ResizeBiliniarFloat32Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<float> input1Values
+        {
+            0.0f, 1.0f, 2.0f,
+            3.0f, 4.0f, 5.0f,
+            6.0f, 7.0f, 8.0f
+        };
+    const std::vector<int32_t> input2NewShape { 5, 5 };
+
+    // Calculate output data
+    std::vector<float> expectedOutputValues
+        {
+            0.0f, 0.6f, 1.2f, 1.8f, 2.0f,
+            1.8f, 2.4f, 3.0f, 3.6f, 3.8f,
+            3.6f, 4.2f, 4.8f, 5.4f, 5.6f,
+            5.4f, 6.0f, 6.6f, 7.2f, 7.4f,
+            6.0f, 6.6f, 7.2f, 7.8f, 8.0f
+        };
+
+    const std::vector<int32_t> input1Shape { 1, 3, 3, 1 };
+    const std::vector<int32_t> input2Shape { 2 };
+    const std::vector<int32_t> expectedOutputShape = input2NewShape;
+
+    ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_BILINEAR,
+                       backends,
+                       input1Values,
+                       input1Shape,
+                       input2NewShape,
+                       input2Shape,
+                       expectedOutputValues,
+                       expectedOutputShape);
+}
+
+void ResizeNearestNeighbourFloat32Test(std::vector<armnn::BackendId>& backends)
+{
+    // Set input data
+    std::vector<float> input1Values {  1.0f, 2.0f, 3.0f, 4.0f }
+    ;
+    const std::vector<int32_t> input2NewShape { 1, 1 };
+
+    // Calculate output data
+    std::vector<float> expectedOutputValues { 1.0f };
+
+    const std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
+    const std::vector<int32_t> input2Shape { 2 };
+    const std::vector<int32_t> expectedOutputShape = input2NewShape;
+
+    ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
+                       backends,
+                       input1Values,
+                       input1Shape,
+                       input2NewShape,
+                       input2Shape,
+                       expectedOutputValues,
+                       expectedOutputShape);
+}
+
+TEST_SUITE("ResizeTests_GpuAccTests")
+{
+
+TEST_CASE ("Resize_Biliniar_Float32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ResizeBiliniarFloat32Test(backends);
+}
+
+TEST_CASE ("Resize_NearestNeighbour_Float32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    ResizeNearestNeighbourFloat32Test(backends);
+}
+
+} // TEST_SUITE("ResizeTests_GpuAccTests")
+
+
+TEST_SUITE("ResizeTests_CpuAccTests")
+{
+
+TEST_CASE ("Resize_Biliniar_Float32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ResizeBiliniarFloat32Test(backends);
+}
+
+TEST_CASE ("Resize_NearestNeighbour_Float32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    ResizeNearestNeighbourFloat32Test(backends);
+}
+
+} // TEST_SUITE("ResizeTests_CpuAccTests")
+
+
+TEST_SUITE("ResizeTests_CpuRefTests")
+{
+
+TEST_CASE ("Resize_Biliniar_Float32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ResizeBiliniarFloat32Test(backends);
+}
+
+TEST_CASE ("Resize_NearestNeighbour_Float32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ResizeNearestNeighbourFloat32Test(backends);
+}
+
+} // TEST_SUITE("ResizeTests_CpuRefTests")
+
+} // namespace armnnDelegate
diff --git a/delegate/test/ResizeTestHelper.hpp b/delegate/test/ResizeTestHelper.hpp
new file mode 100644
index 0000000..ab7de14
--- /dev/null
+++ b/delegate/test/ResizeTestHelper.hpp
@@ -0,0 +1,194 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+
+std::vector<char> CreateResizeTfLiteModel(tflite::BuiltinOperator operatorCode,
+                                          tflite::TensorType inputTensorType,
+                                          const std::vector <int32_t>& inputTensorShape,
+                                          const std::vector <int32_t>& sizeTensorData,
+                                          const std::vector <int32_t>& sizeTensorShape,
+                                          const std::vector <int32_t>& outputTensorShape)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector(
+                                           reinterpret_cast<const uint8_t*>(sizeTensorData.data()),
+                                           sizeof(int32_t) * sizeTensorData.size())));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(), inputTensorShape.size()),
+                              inputTensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input_tensor"));
+
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(sizeTensorShape.data(),
+                                                                      sizeTensorShape.size()),
+                              TensorType_INT32,
+                              2,
+                              flatBufferBuilder.CreateString("size_input_tensor"));
+
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              inputTensorType,
+                              3,
+                              flatBufferBuilder.CreateString("output_tensor"));
+
+    // Create Operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_NONE;
+    flatbuffers::Offset<void> operatorBuiltinOption = 0;
+    switch (operatorCode)
+    {
+        case BuiltinOperator_RESIZE_BILINEAR:
+        {
+            operatorBuiltinOption = CreateResizeBilinearOptions(flatBufferBuilder, false, false).Union();
+            operatorBuiltinOptionsType = tflite::BuiltinOptions_ResizeBilinearOptions;
+            break;
+        }
+        case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR:
+        {
+            operatorBuiltinOption = CreateResizeNearestNeighborOptions(flatBufferBuilder, false, false).Union();
+            operatorBuiltinOptionsType = tflite::BuiltinOptions_ResizeNearestNeighborOptions;
+            break;
+        }
+        default:
+            break;
+    }
+
+    const std::vector<int> operatorInputs{0, 1};
+    const std::vector<int> operatorOutputs{2};
+    flatbuffers::Offset <Operator> resizeOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOption);
+
+    const std::vector<int> subgraphInputs{0, 1};
+    const std::vector<int> subgraphOutputs{2};
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&resizeOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Resize Biliniar Operator Model");
+    flatbuffers::Offset <OperatorCode> opCode = CreateOperatorCode(flatBufferBuilder, operatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&opCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+void ResizeFP32TestImpl(tflite::BuiltinOperator operatorCode,
+                        std::vector<armnn::BackendId>& backends,
+                        std::vector<float>& input1Values,
+                        std::vector<int32_t> input1Shape,
+                        std::vector<int32_t> input2NewShape,
+                        std::vector<int32_t> input2Shape,
+                        std::vector<float>& expectedOutputValues,
+                        std::vector<int32_t> expectedOutputShape)
+{
+    using namespace tflite;
+
+    std::vector<char> modelBuffer = CreateResizeTfLiteModel(operatorCode,
+                                                            ::tflite::TensorType_FLOAT32,
+                                                            input1Shape,
+                                                            input2NewShape,
+                                                            input2Shape,
+                                                            expectedOutputShape);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // The model will be executed using tflite and using the armnn delegate so that the outputs
+    // can be compared.
+
+    // Create TfLite Interpreter with armnn delegate
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create TfLite Interpreter without armnn delegate
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+                        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data for the armnn interpreter
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input1Values);
+    armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input2NewShape);
+
+    // Set input data for the tflite interpreter
+    armnnDelegate::FillInput(tfLiteInterpreter, 0, input1Values);
+    armnnDelegate::FillInput(tfLiteInterpreter, 1, input2NewShape);
+
+    // Run EnqueWorkload
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+    for (size_t i = 0; i < expectedOutputValues.size(); i++)
+    {
+        CHECK(expectedOutputValues[i] == doctest::Approx(armnnDelegateOutputData[i]));
+        CHECK(armnnDelegateOutputData[i] == doctest::Approx(tfLiteDelageOutputData[i]));
+    }
+
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/RoundTest.cpp b/delegate/test/RoundTest.cpp
new file mode 100644
index 0000000..b4f0446
--- /dev/null
+++ b/delegate/test/RoundTest.cpp
@@ -0,0 +1,72 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RoundTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void FloorFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  {1, 3, 2, 3};
+    std::vector<int32_t> outputShape {1, 3, 2, 3};
+
+    std::vector<float> inputValues { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+                                     1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f };
+
+    std::vector<float> expectedOutputValues { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
+                                              1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f };
+
+    RoundTest<float>(tflite::BuiltinOperator_FLOOR,
+                     ::tflite::TensorType_FLOAT32,
+                     backends,
+                     inputShape,
+                     inputValues,
+                     expectedOutputValues);
+}
+
+// FLOOR Test Suite
+TEST_SUITE("FLOOR_CpuRefTests")
+{
+
+TEST_CASE ("FLOOR_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    FloorFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("FLOOR_CpuAccTests")
+{
+
+TEST_CASE ("FLOOR_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    FloorFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("FLOOR_GpuAccTests")
+{
+
+TEST_CASE ("FLOOR_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    FloorFp32Test(backends);
+}
+
+}
+// End of FLOOR Test Suite
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/RoundTestHelper.hpp b/delegate/test/RoundTestHelper.hpp
new file mode 100644
index 0000000..dc14abf
--- /dev/null
+++ b/delegate/test/RoundTestHelper.hpp
@@ -0,0 +1,163 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+std::vector<char> CreateRoundTfLiteModel(tflite::BuiltinOperator roundOperatorCode,
+                                         tflite::TensorType tensorType,
+                                         const std::vector <int32_t>& tensorShape,
+                                         float quantScale = 1.0f,
+                                         int quantOffset = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({quantScale}),
+                                     flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
+
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    const std::vector<int32_t> operatorInputs({0});
+    const std::vector<int32_t> operatorOutputs({1});
+
+    flatbuffers::Offset<Operator> roundOperator;
+    flatbuffers::Offset<flatbuffers::String> modelDescription;
+    flatbuffers::Offset<OperatorCode> operatorCode;
+
+    switch (roundOperatorCode)
+    {
+        case tflite::BuiltinOperator_FLOOR:
+        default:
+            roundOperator =
+                CreateOperator(flatBufferBuilder,
+                               0,
+                               flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                               flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()));
+                modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Floor Operator Model");
+                operatorCode = CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_FLOOR);
+            break;
+    }
+    const std::vector<int32_t> subgraphInputs({0});
+    const std::vector<int32_t> subgraphOutputs({1});
+    flatbuffers::Offset<SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&roundOperator, 1));
+
+    flatbuffers::Offset<Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template<typename T>
+void RoundTest(tflite::BuiltinOperator roundOperatorCode,
+               tflite::TensorType tensorType,
+               std::vector<armnn::BackendId>& backends,
+               std::vector<int32_t>& shape,
+               std::vector<T>& inputValues,
+               std::vector<T>& expectedOutputValues,
+               float quantScale = 1.0f,
+               int quantOffset = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateRoundTfLiteModel(roundOperatorCode,
+                                                           tensorType,
+                                                           shape,
+                                                           quantScale,
+                                                           quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegate) == kTfLiteOk);
+    CHECK(armnnDelegate != nullptr);
+    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteDelegate) == kTfLiteOk);
+    CHECK(tfLiteDelegate != nullptr);
+    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
+                                        armnnDelegate,
+                                        shape,
+                                        expectedOutputValues,
+                                        0);
+
+    tfLiteDelegate.reset(nullptr);
+    armnnDelegate.reset(nullptr);
+}
+
+} // anonymous namespace
diff --git a/delegate/test/ShapeTest.cpp b/delegate/test/ShapeTest.cpp
new file mode 100644
index 0000000..309b071
--- /dev/null
+++ b/delegate/test/ShapeTest.cpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ShapeTestHelper.hpp"
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void ShapeSimpleTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape{ 1, 3, 2, 3 };
+
+    std::vector<int32_t> inputValues{ 1, 1, 1, 1, 1, 1, 1, 1,
+                                      1, 1, 1, 1, 1, 1, 1, 1, };
+
+    std::vector<int32_t> expectedOutputShape{ 4 };
+    std::vector<int32_t> expectedOutputValues{ 1, 3, 2, 3 };
+
+    ShapeTest<int32_t, int32_t>(::tflite::TensorType_INT32,
+                                ::tflite::TensorType_INT32,
+                                backends,
+                                inputShape,
+                                inputValues,
+                                expectedOutputValues,
+                                expectedOutputShape);
+}
+
+// SHAPE Test Suite
+TEST_SUITE("SHAPE_CpuRefTests")
+{
+
+TEST_CASE("SHAPE_Simple_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    ShapeSimpleTest(backends);
+}
+
+}
+// End of SHAPE Test Suite
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/ShapeTestHelper.hpp b/delegate/test/ShapeTestHelper.hpp
new file mode 100644
index 0000000..54e27ac
--- /dev/null
+++ b/delegate/test/ShapeTestHelper.hpp
@@ -0,0 +1,173 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+std::vector<char> CreateShapeTfLiteModel(tflite::TensorType inputTensorType,
+                                         tflite::TensorType outputTensorType,
+                                         const std::vector<int32_t>& inputTensorShape,
+                                         const std::vector<int32_t>& outputTensorShape,
+                                         float quantScale = 1.0f,
+                                         int quantOffset = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+             CreateQuantizationParameters(flatBufferBuilder,
+                                          0,
+                                          0,
+                                          flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                          flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              inputTensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              outputTensorType,
+                              2,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    const std::vector<int32_t> operatorInputs({ 0 });
+    const std::vector<int32_t> operatorOutputs({ 1 });
+
+    flatbuffers::Offset<Operator> shapeOperator =
+                                      CreateOperator(flatBufferBuilder,
+                                                     0,
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
+                                                                                             operatorInputs.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+                                                                                             operatorOutputs.size()),
+                                                     BuiltinOptions_ShapeOptions,
+                                                     CreateShapeOptions(flatBufferBuilder, outputTensorType).Union());
+
+    flatbuffers::Offset<flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: SHAPE Operator Model");
+
+    flatbuffers::Offset<OperatorCode> operatorCode =
+        CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_SHAPE);
+
+    const std::vector<int32_t>    subgraphInputs({ 0 });
+    const std::vector<int32_t>    subgraphOutputs({ 1 });
+
+    flatbuffers::Offset<SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(),
+                                                               subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
+                                                               subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&shapeOperator, 1));
+
+    flatbuffers::Offset<Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template<typename T, typename K>
+void ShapeTest(tflite::TensorType inputTensorType,
+               tflite::TensorType outputTensorType,
+               std::vector<armnn::BackendId>& backends,
+               std::vector<int32_t>& inputShape,
+               std::vector<T>& inputValues,
+               std::vector<K>& expectedOutputValues,
+               std::vector<int32_t>& expectedOutputShape,
+               float quantScale = 1.0f,
+               int quantOffset = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateShapeTfLiteModel(inputTensorType,
+                                                           outputTensorType,
+                                                           inputShape,
+                                                           expectedOutputShape,
+                                                           quantScale,
+                                                           quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegate;
+
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegate) == kTfLiteOk);
+    CHECK(armnnDelegate != nullptr);
+    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteDelegate;
+
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteDelegate) == kTfLiteOk);
+    CHECK(tfLiteDelegate != nullptr);
+    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+
+    std::unique_ptr < TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete) >
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<K>(tfLiteDelegate,
+                                        armnnDelegate,
+                                        expectedOutputShape,
+                                        expectedOutputValues,
+                                        0);
+
+    tfLiteDelegate.reset(nullptr);
+    armnnDelegate.reset(nullptr);
+}
+
+} // anonymous namespace
diff --git a/delegate/test/SliceTest.cpp b/delegate/test/SliceTest.cpp
new file mode 100644
index 0000000..88a70de
--- /dev/null
+++ b/delegate/test/SliceTest.cpp
@@ -0,0 +1,81 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SliceTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void SliceFixtureSimpleTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 3, 2, 3 };
+    std::vector<int32_t> outputShape { 2, 1, 3 };
+    std::vector<int32_t> beginShape  { 3 };
+    std::vector<int32_t> sizeShape   { 3 };
+
+    std::vector<int32_t> beginData { 1, 0, 0 };
+    std::vector<int32_t> sizeData  { 2, 1, 3 };
+    std::vector<float> inputData  { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
+                                    3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
+                                    5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
+    std::vector<float> outputData { 3.0f, 3.0f, 3.0f,
+                                    5.0f, 5.0f, 5.0f };
+
+    SliceTestImpl<float>(
+        backends,
+        inputData,
+        outputData,
+        beginData,
+        sizeData,
+        inputShape,
+        beginShape,
+        sizeShape,
+        outputShape);
+}
+
+TEST_SUITE("Slice_CpuRefTests")
+{
+
+TEST_CASE ("Slice_Simple_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    SliceFixtureSimpleTest(backends);
+}
+
+} // Slice_CpuRefTests TestSuite
+
+
+
+TEST_SUITE("Slice_CpuAccTests")
+{
+
+TEST_CASE ("Slice_Simple_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    SliceFixtureSimpleTest(backends);
+}
+
+} // Slice_CpuAccTests TestSuite
+
+
+
+TEST_SUITE("StridedSlice_GpuAccTests")
+{
+
+TEST_CASE ("Slice_Simple_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    SliceFixtureSimpleTest(backends);
+}
+
+} // Slice_GpuAccTests TestSuite
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/SliceTestHelper.hpp b/delegate/test/SliceTestHelper.hpp
new file mode 100644
index 0000000..c938fad
--- /dev/null
+++ b/delegate/test/SliceTestHelper.hpp
@@ -0,0 +1,183 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+#include <armnn/DescriptorsFwd.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+#include <string>
+
+namespace
+{
+
+std::vector<char> CreateSliceTfLiteModel(tflite::TensorType tensorType,
+                                         const std::vector<int32_t>& inputTensorShape,
+                                         const std::vector<int32_t>& beginTensorData,
+                                         const std::vector<int32_t>& sizeTensorData,
+                                         const std::vector<int32_t>& beginTensorShape,
+                                         const std::vector<int32_t>& sizeTensorShape,
+                                         const std::vector<int32_t>& outputTensorShape)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    flatbuffers::Offset<tflite::Buffer> buffers[5] = {
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder,
+            flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
+            sizeof(int32_t) * beginTensorData.size())),
+            CreateBuffer(flatBufferBuilder,
+            flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(sizeTensorData.data()),
+            sizeof(int32_t) * sizeTensorData.size())),
+            CreateBuffer(flatBufferBuilder)
+    };
+
+    std::array<flatbuffers::Offset<Tensor>, 4> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"));
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(beginTensorShape.data(),
+                                                                      beginTensorShape.size()),
+                              ::tflite::TensorType_INT32,
+                              2,
+                              flatBufferBuilder.CreateString("begin_tensor"));
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(sizeTensorShape.data(),
+                                                                      sizeTensorShape.size()),
+                              ::tflite::TensorType_INT32,
+                              3,
+                              flatBufferBuilder.CreateString("size_tensor"));
+    tensors[3] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              4,
+                              flatBufferBuilder.CreateString("output"));
+
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SliceOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateSliceOptions(flatBufferBuilder).Union();
+
+    const std::vector<int> operatorInputs{ 0, 1, 2 };
+    const std::vector<int> operatorOutputs{ 3 };
+    flatbuffers::Offset <Operator> sliceOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{ 0, 1, 2 };
+    const std::vector<int> subgraphOutputs{ 3 };
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&sliceOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Slice Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         BuiltinOperator_SLICE);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers, 5));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void SliceTestImpl(std::vector<armnn::BackendId>& backends,
+                   std::vector<T>& inputValues,
+                   std::vector<T>& expectedOutputValues,
+                   std::vector<int32_t>& beginTensorData,
+                   std::vector<int32_t>& sizeTensorData,
+                   std::vector<int32_t>& inputTensorShape,
+                   std::vector<int32_t>& beginTensorShape,
+                   std::vector<int32_t>& sizeTensorShape,
+                   std::vector<int32_t>& outputTensorShape)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateSliceTfLiteModel(
+        ::tflite::TensorType_FLOAT32,
+        inputTensorShape,
+        beginTensorData,
+        sizeTensorData,
+        beginTensorShape,
+        sizeTensorShape,
+        outputTensorShape);
+
+    auto tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+        (&armnnDelegate) == kTfLiteOk);
+    CHECK(armnnDelegate != nullptr);
+    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+        (&tfLiteDelegate) == kTfLiteOk);
+    CHECK(tfLiteDelegate != nullptr);
+    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
+                                        armnnDelegate,
+                                        outputTensorShape,
+                                        expectedOutputValues);
+
+    tfLiteDelegate.reset(nullptr);
+    armnnDelegate.reset(nullptr);
+} // End of Slice Test
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/SoftmaxTest.cpp b/delegate/test/SoftmaxTest.cpp
new file mode 100644
index 0000000..27f7809
--- /dev/null
+++ b/delegate/test/SoftmaxTest.cpp
@@ -0,0 +1,77 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SoftmaxTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+TEST_SUITE ("Softmax_GpuAccTests")
+{
+
+TEST_CASE ("Softmax_Standard_Beta_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    std::vector<float> expectedOutput = {0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
+                                         0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
+    SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
+}
+
+TEST_CASE ("Softmax_Different_Beta_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    std::vector<float> expectedOutput = {0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092, 0.352414012,
+                                         0.224709094, 0.193408906, 0.123322964, 0.106145054};
+    SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
+
+}
+
+TEST_CASE ("Log_Softmax_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    std::vector<float> expectedOutput =
+        {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
+         -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
+    SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
+}
+} // TEST_SUITE ("Softmax_GpuAccTests")
+
+TEST_SUITE ("Softmax_CpuRefTests")
+{
+
+TEST_CASE ("Softmax_Standard_Beta_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<float> expectedOutput = {
+        0.00994190481, 0.0445565246, 0.0734612942, 0.329230666, 0.542809606,
+        0.710742831, 0.158588171, 0.0961885825, 0.0214625746, 0.0130177103};
+    SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 1, expectedOutput);
+}
+
+TEST_CASE ("Softmax_Different_Beta_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<float> expectedOutput = {
+        0.0946234912, 0.148399189, 0.172415257, 0.270400971, 0.314161092,
+        0.352414012, 0.224709094, 0.193408906, 0.123322964, 0.106145054};
+    SoftmaxTestCase(tflite::BuiltinOperator_SOFTMAX, backends, 0.3, expectedOutput);
+}
+
+TEST_CASE ("Log_Softmax_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    std::vector<float> expectedOutput =
+        {-4.61099672, -3.11099672, -2.61099672, -1.11099672, -0.610996664,
+         -0.341444582, -1.84144461, -2.34144449, -3.84144449, -4.34144449};
+    SoftmaxTestCase(tflite::BuiltinOperator_LOG_SOFTMAX, backends, 0, expectedOutput);
+}
+} // TEST_SUITE ("Softmax_CpuRefTests")
+} // namespace armnnDelegate
diff --git a/delegate/test/SoftmaxTestHelper.hpp b/delegate/test/SoftmaxTestHelper.hpp
new file mode 100644
index 0000000..15177b7
--- /dev/null
+++ b/delegate/test/SoftmaxTestHelper.hpp
@@ -0,0 +1,194 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+#include <armnnUtils/FloatingPointComparison.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+std::vector<char> CreateSoftmaxTfLiteModel(tflite::BuiltinOperator softmaxOperatorCode,
+                                           tflite::TensorType tensorType,
+                                           const std::vector <int32_t>& tensorShape,
+                                           float beta)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              1);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
+                                                                      tensorShape.size()),
+                              tensorType,
+                              2);
+
+    const std::vector<int32_t> operatorInputs({0});
+    const std::vector<int32_t> operatorOutputs({1});
+
+    flatbuffers::Offset<Operator> softmaxOperator;
+    flatbuffers::Offset<flatbuffers::String> modelDescription;
+    flatbuffers::Offset<OperatorCode> operatorCode;
+
+    switch (softmaxOperatorCode)
+    {
+        case tflite::BuiltinOperator_SOFTMAX:
+            softmaxOperator =
+                CreateOperator(flatBufferBuilder,
+                               0,
+                               flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                               flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                               BuiltinOptions_SoftmaxOptions,
+                               CreateSoftmaxOptions(flatBufferBuilder, beta).Union());
+                modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: Softmax Operator Model");
+                operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                 tflite::BuiltinOperator_SOFTMAX);
+            break;
+        case tflite::BuiltinOperator_LOG_SOFTMAX:
+            softmaxOperator =
+                CreateOperator(flatBufferBuilder,
+                               0,
+                               flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                               flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                               BuiltinOptions_LogSoftmaxOptions,
+                               CreateLogSoftmaxOptions(flatBufferBuilder).Union());
+                flatBufferBuilder.CreateString("ArmnnDelegate: Log-Softmax Operator Model");
+            operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                              tflite::BuiltinOperator_LOG_SOFTMAX);
+            break;
+        default:
+            break;
+    }
+    const std::vector<int32_t> subgraphInputs({0});
+    const std::vector<int32_t> subgraphOutputs({1});
+    flatbuffers::Offset<SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&softmaxOperator, 1));
+    flatbuffers::Offset<Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+    flatBufferBuilder.Finish(flatbufferModel);
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+void SoftmaxTest(tflite::BuiltinOperator softmaxOperatorCode,
+                 tflite::TensorType tensorType,
+                 std::vector<armnn::BackendId>& backends,
+                 std::vector<int32_t>& shape,
+                 std::vector<float>& inputValues,
+                 std::vector<float>& expectedOutputValues,
+                 float beta = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode,
+                                                             tensorType,
+                                                             shape,
+                                                             beta);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+    auto tfLiteInterpreterInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteInterpreterInputData[i] = inputValues[i];
+    }
+
+    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        armnnDelegateInputData[i] = inputValues[i];
+    }
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+
+    for (size_t i = 0; i < inputValues.size(); ++i)
+    {
+         CHECK(armnnUtils::within_percentage_tolerance(expectedOutputValues[i], armnnDelegateOutputData[i], 0.1));
+         CHECK(armnnUtils::within_percentage_tolerance(tfLiteInterpreterOutputData[i],
+                                                       armnnDelegateOutputData[i], 0.1));
+    }
+}
+
+
+/// Convenience function to run softmax and log-softmax test cases
+/// \param operatorCode tflite::BuiltinOperator_SOFTMAX or tflite::BuiltinOperator_LOG_SOFTMAX
+/// \param backends armnn backends to target
+/// \param beta multiplicative parameter to the softmax function
+/// \param expectedOutput to be checked against transformed input
+void SoftmaxTestCase(tflite::BuiltinOperator operatorCode,
+                     std::vector<armnn::BackendId> backends, float beta, std::vector<float> expectedOutput) {
+    std::vector<float> input = {
+        1.0, 2.5, 3.0, 4.5, 5.0,
+        -1.0, -2.5, -3.0, -4.5, -5.0};
+    std::vector<int32_t> shape = {2, 5};
+
+    SoftmaxTest(operatorCode,
+                tflite::TensorType_FLOAT32,
+                backends,
+                shape,
+                input,
+                expectedOutput,
+                beta);
+}
+
+} // anonymous namespace
diff --git a/delegate/test/SpaceDepthTest.cpp b/delegate/test/SpaceDepthTest.cpp
new file mode 100644
index 0000000..8a8bbae
--- /dev/null
+++ b/delegate/test/SpaceDepthTest.cpp
@@ -0,0 +1,207 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SpaceDepthTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void DepthToSpaceFp32Test(std::vector<armnn::BackendId>& backends, int blockSize)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 2, 2, 4 };
+    std::vector<int32_t> outputShape { 1, 4, 4, 1 };
+
+    std::vector<float> inputValues = { 1.f,  2.f,  3.f,  4.f,
+                                       5.f,  6.f,  7.f,  8.f,
+                                       9.f, 10.f, 11.f, 12.f,
+                                       13.f, 14.f, 15.f, 16.f };
+
+    std::vector<float> expectedOutputValues = { 1.f,   2.f,   5.f,   6.f,
+                                                3.f,   4.f,   7.f,   8.f,
+                                                9.f,  10.f,  13.f,  14.f,
+                                                11.f,  12.f,  15.f,  16.f };
+
+    SpaceDepthTest<float>(tflite::BuiltinOperator_DEPTH_TO_SPACE,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          blockSize);
+}
+
+void DepthToSpaceUint8Test(std::vector<armnn::BackendId>& backends, int blockSize)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 2, 1, 1, 4 };
+    std::vector<int32_t> outputShape { 2, 2, 2, 1 };
+
+    std::vector<uint8_t> inputValues = { 1,  2,  3,  4,
+                                         5,  6,  7,  8 };
+
+    std::vector<uint8_t> expectedOutputValues = { 1,  2,  3,  4,
+                                                  5,  6,  7,  8 };
+
+    SpaceDepthTest<uint8_t>(tflite::BuiltinOperator_DEPTH_TO_SPACE,
+                            ::tflite::TensorType_UINT8,
+                            backends,
+                            inputShape,
+                            outputShape,
+                            inputValues,
+                            expectedOutputValues,
+                            blockSize);
+}
+
+void SpaceToDepthFp32Test(std::vector<armnn::BackendId>& backends, int blockSize)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 2, 2, 2 };
+    std::vector<int32_t> outputShape { 1, 1, 1, 8 };
+
+    std::vector<float> inputValues = { 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f };
+    std::vector<float> expectedOutputValues = { 1.4f, 2.3f, 3.2f, 4.1f, 5.4f, 6.3f, 7.2f, 8.1f };
+
+    SpaceDepthTest<float>(tflite::BuiltinOperator_SPACE_TO_DEPTH,
+                          ::tflite::TensorType_FLOAT32,
+                          backends,
+                          inputShape,
+                          outputShape,
+                          inputValues,
+                          expectedOutputValues,
+                          blockSize);
+}
+
+void SpaceToDepthUint8Test(std::vector<armnn::BackendId>& backends, int blockSize)
+{
+    // Set input data
+    std::vector<int32_t> inputShape { 1, 2, 2, 1 };
+    std::vector<int32_t> outputShape { 1, 1, 1, 4 };
+
+    std::vector<uint8_t> inputValues = { 1, 2, 3, 2 };
+    std::vector<uint8_t> expectedOutputValues = { 1, 2, 3, 2 };
+
+    SpaceDepthTest<uint8_t>(tflite::BuiltinOperator_SPACE_TO_DEPTH,
+                            ::tflite::TensorType_UINT8,
+                            backends,
+                            inputShape,
+                            outputShape,
+                            inputValues,
+                            expectedOutputValues,
+                            blockSize);
+}
+
+TEST_SUITE("DepthToSpace_CpuRefTests")
+{
+
+TEST_CASE ("DepthToSpaceFp32Test_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    DepthToSpaceFp32Test(backends, 2);
+}
+
+TEST_CASE ("DepthToSpaceUint8Test_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    DepthToSpaceUint8Test(backends, 2);
+}
+
+} // TEST_SUITE("DepthToSpace_CpuRefTests")
+
+
+TEST_SUITE("DepthToSpace_CpuAccTests")
+{
+
+TEST_CASE ("DepthToSpaceFp32Test_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    DepthToSpaceFp32Test(backends, 2);
+}
+
+TEST_CASE ("DepthToSpaceUint8Test_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    DepthToSpaceUint8Test(backends, 2);
+}
+
+} // TEST_SUITE("DepthToSpace_CpuAccTests")
+
+TEST_SUITE("DepthToSpace_GpuAccTests")
+{
+
+TEST_CASE ("DepthToSpaceFp32Test_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    DepthToSpaceFp32Test(backends, 2);
+}
+
+TEST_CASE ("DepthToSpaceUint8Test_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    DepthToSpaceUint8Test(backends, 2);
+}
+
+} // TEST_SUITE("DepthToSpace_GpuAccTests")
+
+TEST_SUITE("SpaceToDepth_CpuRefTests")
+{
+
+TEST_CASE ("SpaceToDepthFp32Test_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    SpaceToDepthFp32Test(backends, 2);
+}
+
+TEST_CASE ("SpaceToDepthUint8Test_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    SpaceToDepthUint8Test(backends, 2);
+}
+
+} // TEST_SUITE("SpaceToDepth_CpuRefTests")
+
+TEST_SUITE("SpaceToDepth_CpuAccTests")
+{
+
+TEST_CASE ("SpaceToDepthFp32Test_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    SpaceToDepthFp32Test(backends, 2);
+}
+
+TEST_CASE ("SpaceToDepthUint8Test_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    SpaceToDepthUint8Test(backends, 2);
+}
+
+} // TEST_SUITE("SpaceToDepth_CpuAccTests")
+
+TEST_SUITE("SpaceToDepth_GpuAccTests")
+{
+
+TEST_CASE ("SpaceToDepthFp32Test_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    SpaceToDepthFp32Test(backends, 2);
+}
+
+TEST_CASE ("SpaceToDepthUint8Test_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
+    SpaceToDepthUint8Test(backends, 2);
+}
+
+} // TEST_SUITE("SpaceToDepth_GpuAccTests")
+
+} // namespace armnnDelegate
diff --git a/delegate/test/SpaceDepthTestHelper.hpp b/delegate/test/SpaceDepthTestHelper.hpp
new file mode 100644
index 0000000..6e8e39d
--- /dev/null
+++ b/delegate/test/SpaceDepthTestHelper.hpp
@@ -0,0 +1,168 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+std::vector<char> CreateSpaceDepthTfLiteModel(tflite::BuiltinOperator spaceDepthOperatorCode,
+                                              tflite::TensorType tensorType,
+                                              const std::vector <int32_t>& inputTensorShape,
+                                              const std::vector <int32_t>& outputTensorShape,
+                                              int32_t blockSize)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({  1.0f }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ 0 }));
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    const std::vector<int32_t> operatorInputs({0});
+    const std::vector<int32_t> operatorOutputs({1});
+
+    flatbuffers::Offset<Operator> spaceDepthOperator;
+    flatbuffers::Offset<flatbuffers::String> modelDescription;
+    flatbuffers::Offset<OperatorCode> operatorCode;
+
+    switch (spaceDepthOperatorCode)
+    {
+        case tflite::BuiltinOperator_SPACE_TO_DEPTH:
+            spaceDepthOperator =
+                CreateOperator(flatBufferBuilder,
+                               0,
+                               flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                               flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                               BuiltinOptions_SpaceToDepthOptions,
+                               CreateSpaceToDepthOptions(flatBufferBuilder, blockSize).Union());
+                modelDescription = flatBufferBuilder.CreateString("ArmnnDelegate: SPACE_TO_DEPTH Operator Model");
+                operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                 tflite::BuiltinOperator_SPACE_TO_DEPTH);
+            break;
+        case tflite::BuiltinOperator_DEPTH_TO_SPACE:
+            spaceDepthOperator =
+                CreateOperator(flatBufferBuilder,
+                               0,
+                               flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                               flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                               BuiltinOptions_DepthToSpaceOptions,
+                               CreateDepthToSpaceOptions(flatBufferBuilder, blockSize).Union());
+                flatBufferBuilder.CreateString("ArmnnDelegate: DEPTH_TO_SPACE Operator Model");
+            operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                              tflite::BuiltinOperator_DEPTH_TO_SPACE);
+            break;
+        default:
+            break;
+    }
+    const std::vector<int32_t> subgraphInputs({0});
+    const std::vector<int32_t> subgraphOutputs({1});
+    flatbuffers::Offset<SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&spaceDepthOperator, 1));
+    flatbuffers::Offset<Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+    flatBufferBuilder.Finish(flatbufferModel);
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void SpaceDepthTest(tflite::BuiltinOperator spaceDepthOperatorCode,
+                    tflite::TensorType tensorType,
+                    std::vector<armnn::BackendId>& backends,
+                    std::vector<int32_t>& inputShape,
+                    std::vector<int32_t>& outputShape,
+                    std::vector<T>& inputValues,
+                    std::vector<T>& expectedOutputValues,
+                    int32_t blockSize = 2)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateSpaceDepthTfLiteModel(spaceDepthOperatorCode,
+                                                                tensorType,
+                                                                inputShape,
+                                                                outputShape,
+                                                                blockSize);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                         armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+}
+
+} // anonymous namespace
diff --git a/delegate/test/SplitTest.cpp b/delegate/test/SplitTest.cpp
new file mode 100644
index 0000000..b54ce21
--- /dev/null
+++ b/delegate/test/SplitTest.cpp
@@ -0,0 +1,262 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "SplitTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+// SPLIT Operator
+void SplitUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> axisShape { 1 };
+    std::vector<int32_t> inputShape { 2, 2, 2, 2} ;
+    std::vector<int32_t> outputShape0 { 2, 2, 2, 1 };
+    std::vector<int32_t> outputShape1 { 2, 2, 2, 1 };
+    std::vector<std::vector<int32_t>> outputShapes{ outputShape0, outputShape1 };
+
+    std::vector<int32_t> axisData { 3 };  // Axis
+    std::vector<uint8_t> inputValues { 1, 2, 3, 4, 5, 6, 7, 8,
+                                       9, 10, 11, 12, 13, 14, 15, 16 }; // Input
+
+
+    std::vector<uint8_t> expectedOutputValues0 { 1, 3, 5, 7, 9, 11, 13, 15 };
+    std::vector<uint8_t> expectedOutputValues1 { 2, 4, 6, 8, 10, 12, 14, 16 };
+    std::vector<std::vector<uint8_t>> expectedOutputValues{ expectedOutputValues0, expectedOutputValues1 };
+
+    int32_t numSplits = 2;
+
+    SplitTest<uint8_t>(::tflite::TensorType_UINT8,
+                       backends,
+                       axisShape,
+                       inputShape,
+                       outputShapes,
+                       axisData,
+                       inputValues,
+                       expectedOutputValues,
+                       numSplits);
+}
+
+void SplitFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> axisShape { 1 };
+    std::vector<int32_t> inputShape { 2, 2, 2, 2 };
+    std::vector<int32_t> outputShape0 { 2, 1, 2, 2 };
+    std::vector<int32_t> outputShape1 { 2, 1, 2, 2 };
+    std::vector<std::vector<int32_t>> outputShapes{ outputShape0, outputShape1 };
+
+    std::vector<int32_t> axisData { 1 };  // Axis
+    std::vector<float> inputValues { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
+                                     9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f  }; // Input
+
+
+    std::vector<float> expectedOutputValues0 { 1.0f, 2.0f, 3.0f, 4.0f, 9.0f, 10.0f, 11.0f, 12.0f };
+    std::vector<float> expectedOutputValues1 { 5.0f, 6.0f, 7.0f, 8.0f, 13.0f, 14.0f, 15.0f, 16.0f };
+    std::vector<std::vector<float>> expectedOutputValues{ expectedOutputValues0, expectedOutputValues1 };
+
+    int32_t numSplits = 2;
+
+    SplitTest<float>(::tflite::TensorType_FLOAT32,
+                     backends,
+                     axisShape,
+                     inputShape,
+                     outputShapes,
+                     axisData,
+                     inputValues,
+                     expectedOutputValues,
+                     numSplits);
+}
+
+// SPLIT Test Suite
+TEST_SUITE("SPLIT_CpuRefTests")
+{
+
+TEST_CASE ("SPLIT_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    SplitUint8Test(backends);
+}
+
+TEST_CASE ("SPLIT_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    SplitFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("SPLIT_CpuAccTests")
+{
+
+TEST_CASE ("SPLIT_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    SplitUint8Test(backends);
+}
+
+TEST_CASE ("SPLIT_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    SplitFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("SPLIT_GpuAccTests")
+{
+
+TEST_CASE ("SPLIT_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    SplitUint8Test(backends);
+}
+
+TEST_CASE ("SPLIT_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    SplitFp32Test(backends);
+}
+
+}
+// End of SPLIT Test Suite
+
+// SPLIT_V Operator
+void SplitVUint8Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> axisShape { 1 };
+    std::vector<int32_t> inputShape { 2, 4, 2, 2 };
+    std::vector<int32_t> splitsShape { 2 };
+    std::vector<int32_t> outputShape0 { 2, 3, 2, 2 };
+    std::vector<int32_t> outputShape1 { 2, 1, 2, 2 };
+    std::vector<std::vector<int32_t>> outputShapes{ outputShape0, outputShape1 };
+
+    std::vector<int32_t> axisData { 1 };    // Axis
+    std::vector<int32_t> splitsData { 3, 1 };  // Splits
+    std::vector<uint8_t> inputValues { 1, 2, 3, 4, 5, 6, 7, 8,
+                                     9, 10, 11, 12, 13, 14, 15, 16,
+                                     17, 18, 19, 20, 21, 22, 23, 24,
+                                     25, 26, 27, 28, 29, 30, 31, 32   }; // Input
+
+
+    std::vector<uint8_t> expectedOutputValues0 { 1, 2, 3, 4, 5, 6, 7, 8,
+                                               9, 10, 11, 12, 17, 18, 19, 20,
+                                               21, 22, 23, 24, 25, 26, 27, 28 };
+    std::vector<uint8_t> expectedOutputValues1 { 13, 14, 15, 16, 29, 30, 31, 32 };
+    std::vector<std::vector<uint8_t>> expectedOutputValues{ expectedOutputValues0, expectedOutputValues1 };
+
+    int32_t numSplits = 2;
+
+    SplitVTest<uint8_t>(::tflite::TensorType_UINT8,
+                        backends,
+                        inputShape,
+                        splitsShape,
+                        axisShape,
+                        outputShapes,
+                        inputValues,
+                        splitsData,
+                        axisData,
+                        expectedOutputValues,
+                        numSplits);
+}
+
+void SplitVFp32Test(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> axisShape { 1 };
+    std::vector<int32_t> inputShape { 2, 4, 2, 2 };
+    std::vector<int32_t> splitsShape { 2 };
+    std::vector<int32_t> outputShape0 { 2, 3, 2, 2 };
+    std::vector<int32_t> outputShape1 { 2, 1, 2, 2 };
+    std::vector<std::vector<int32_t>> outputShapes{ outputShape0, outputShape1 };
+
+    std::vector<int32_t> axisData { 1 };    // Axis
+    std::vector<int32_t> splitsData { 3, 1 };  // Splits
+    std::vector<float> inputValues { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
+                                     9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f,
+                                     17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f,
+                                     25.0f, 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f   }; // Input
+
+
+    std::vector<float> expectedOutputValues0 { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
+                                               9.0f, 10.0f, 11.0f, 12.0f, 17.0f, 18.0f, 19.0f, 20.0f,
+                                               21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f, 27.0f, 28.0f };
+    std::vector<float> expectedOutputValues1 { 13.0f, 14.0f, 15.0f, 16.0f, 29.0f, 30.0f, 31.0f, 32.0f };
+    std::vector<std::vector<float>> expectedOutputValues{ expectedOutputValues0, expectedOutputValues1 };
+
+    int32_t numSplits = 2;
+
+    SplitVTest<float>(::tflite::TensorType_FLOAT32,
+                      backends,
+                      inputShape,
+                      splitsShape,
+                      axisShape,
+                      outputShapes,
+                      inputValues,
+                      splitsData,
+                      axisData,
+                      expectedOutputValues,
+                      numSplits);
+}
+
+// SPLIT_V Test Suite
+TEST_SUITE("SPLIT_V_CpuRefTests")
+{
+
+TEST_CASE ("SPLIT_V_Uint8_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    SplitVUint8Test(backends);
+}
+
+TEST_CASE ("SPLIT_V_Fp32_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    SplitVFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("SPLIT_V_CpuAccTests")
+{
+
+TEST_CASE ("SPLIT_V_Uint8_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    SplitVUint8Test(backends);
+}
+
+TEST_CASE ("SPLIT_V_Fp32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    SplitVFp32Test(backends);
+}
+
+}
+
+TEST_SUITE("SPLIT_V_GpuAccTests")
+{
+
+TEST_CASE ("SPLIT_V_Uint8_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    SplitVUint8Test(backends);
+}
+
+TEST_CASE ("SPLIT_V_Fp32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    SplitVFp32Test(backends);
+}
+
+}
+// End of SPLIT_V Test Suite
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/SplitTestHelper.hpp b/delegate/test/SplitTestHelper.hpp
new file mode 100644
index 0000000..503fbc8
--- /dev/null
+++ b/delegate/test/SplitTestHelper.hpp
@@ -0,0 +1,370 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+#include <string>
+
+namespace
+{
+
+std::vector<char> CreateSplitTfLiteModel(tflite::TensorType tensorType,
+                                         std::vector<int32_t>& axisTensorShape,
+                                         std::vector<int32_t>& inputTensorShape,
+                                         const std::vector<std::vector<int32_t>>& outputTensorShapes,
+                                         std::vector<int32_t>& axisData,
+                                         const int32_t numSplits,
+                                         float quantScale = 1.0f,
+                                         int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
+                                                                  sizeof(int32_t) * axisData.size())));
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 4> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
+                                                                      axisTensorShape.size()),
+                              ::tflite::TensorType_INT32,
+                              2,
+                              flatBufferBuilder.CreateString("axis"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+
+    // Create output tensor
+    for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
+    {
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
+        tensors[i + 2] = CreateTensor(flatBufferBuilder,
+                                      flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
+                                                                              outputTensorShapes[i].size()),
+                                      tensorType,
+                                      (i+3),
+                                      flatBufferBuilder.CreateString("output"),
+                                      quantizationParameters);
+    }
+
+    // create operator. Mean uses ReducerOptions.
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateSplitOptions(flatBufferBuilder, numSplits).Union();
+
+    const std::vector<int> operatorInputs{ {0, 1} };
+    const std::vector<int> operatorOutputs{ {2, 3} };
+    flatbuffers::Offset <Operator> controlOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{ {0, 1} };
+    const std::vector<int> subgraphOutputs{ {2, 3} };
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&controlOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void SplitTest(tflite::TensorType tensorType,
+               std::vector<armnn::BackendId>& backends,
+               std::vector<int32_t>& axisTensorShape,
+               std::vector<int32_t>& inputTensorShape,
+               std::vector<std::vector<int32_t>>& outputTensorShapes,
+               std::vector<int32_t>& axisData,
+               std::vector<T>& inputValues,
+               std::vector<std::vector<T>>& expectedOutputValues,
+               const int32_t numSplits,
+               float quantScale = 1.0f,
+               int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateSplitTfLiteModel(tensorType,
+                                                           axisTensorShape,
+                                                           inputTensorShape,
+                                                           outputTensorShapes,
+                                                           axisData,
+                                                           numSplits,
+                                                           quantScale,
+                                                           quantOffset);
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegate) == kTfLiteOk);
+    CHECK(armnnDelegate != nullptr);
+    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteDelegate) == kTfLiteOk);
+    CHECK(tfLiteDelegate != nullptr);
+    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteDelegate, 1, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegate, 1, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
+    {
+        armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
+                                            armnnDelegate,
+                                            outputTensorShapes[i],
+                                            expectedOutputValues[i],
+                                            i);
+    }
+
+    tfLiteDelegate.reset(nullptr);
+    armnnDelegate.reset(nullptr);
+} // End of SPLIT Test
+
+std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
+                                          std::vector<int32_t>& inputTensorShape,
+                                          std::vector<int32_t>& splitsTensorShape,
+                                          std::vector<int32_t>& axisTensorShape,
+                                          const std::vector<std::vector<int32_t>>& outputTensorShapes,
+                                          std::vector<int32_t>& splitsData,
+                                          std::vector<int32_t>& axisData,
+                                          const int32_t numSplits,
+                                          float quantScale = 1.0f,
+                                          int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+    buffers[1] = CreateBuffer(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(splitsData.data()),
+                                                             sizeof(int32_t) * splitsData.size()));
+    buffers[2] = CreateBuffer(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
+                                                             sizeof(int32_t) * axisData.size()));
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 5> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              tensorType,
+                              0,
+                              flatBufferBuilder.CreateString("input"),
+                              quantizationParameters);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(splitsTensorShape.data(),
+                                                                      splitsTensorShape.size()),
+                              ::tflite::TensorType_INT32,
+                              1,
+                              flatBufferBuilder.CreateString("splits"),
+                              quantizationParameters);
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
+                                                                      axisTensorShape.size()),
+                              ::tflite::TensorType_INT32,
+                              2,
+                              flatBufferBuilder.CreateString("axis"),
+                              quantizationParameters);
+
+    // Create output tensor
+    for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
+    {
+        tensors[i + 3] = CreateTensor(flatBufferBuilder,
+                                      flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
+                                                                              outputTensorShapes[i].size()),
+                                      tensorType,
+                                      0,
+                                      flatBufferBuilder.CreateString("output"),
+                                      quantizationParameters);
+    }
+
+    // create operator. Mean uses ReducerOptions.
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_SplitVOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateSplitVOptions(flatBufferBuilder, numSplits).Union();
+
+    const std::vector<int> operatorInputs{ {0, 1, 2} };
+    const std::vector<int> operatorOutputs{ {3, 4} };
+    flatbuffers::Offset <Operator> controlOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{ {0, 1, 2} };
+    const std::vector<int> subgraphOutputs{ {3, 4} };
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&controlOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT_V);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void SplitVTest(tflite::TensorType tensorType,
+                std::vector<armnn::BackendId>& backends,
+                std::vector<int32_t>& inputTensorShape,
+                std::vector<int32_t>& splitsTensorShape,
+                std::vector<int32_t>& axisTensorShape,
+                std::vector<std::vector<int32_t>>& outputTensorShapes,
+                std::vector<T>& inputValues,
+                std::vector<int32_t>& splitsData,
+                std::vector<int32_t>& axisData,
+                std::vector<std::vector<T>>& expectedOutputValues,
+                const int32_t numSplits,
+                float quantScale = 1.0f,
+                int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateSplitVTfLiteModel(tensorType,
+                                                            inputTensorShape,
+                                                            splitsTensorShape,
+                                                            axisTensorShape,
+                                                            outputTensorShapes,
+                                                            splitsData,
+                                                            axisData,
+                                                            numSplits,
+                                                            quantScale,
+                                                            quantOffset);
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegate) == kTfLiteOk);
+    CHECK(armnnDelegate != nullptr);
+    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteDelegate) == kTfLiteOk);
+    CHECK(tfLiteDelegate != nullptr);
+    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
+    {
+        armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
+                                            armnnDelegate,
+                                            outputTensorShapes[i],
+                                            expectedOutputValues[i],
+                                            i);
+    }
+
+    tfLiteDelegate.reset(nullptr);
+    armnnDelegate.reset(nullptr);
+} // End of SPLIT_V Test
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/StridedSliceTest.cpp b/delegate/test/StridedSliceTest.cpp
new file mode 100644
index 0000000..5b6d7ef
--- /dev/null
+++ b/delegate/test/StridedSliceTest.cpp
@@ -0,0 +1,241 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "StridedSliceTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void StridedSlice4DTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 3, 2, 3, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+    std::vector<int32_t> beginShape  { 4 };
+    std::vector<int32_t> endShape    { 4 };
+    std::vector<int32_t> strideShape { 4 };
+
+    std::vector<int32_t> beginData  { 1, 0, 0, 0 };
+    std::vector<int32_t> endData    { 2, 2, 3, 1 };
+    std::vector<int32_t> strideData { 1, 1, 1, 1 };
+    std::vector<float> inputData  { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
+                                    3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
+                                    5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
+    std::vector<float> outputData { 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f };
+
+    StridedSliceTestImpl<float>(
+            backends,
+            inputData,
+            outputData,
+            beginData,
+            endData,
+            strideData,
+            inputShape,
+            beginShape,
+            endShape,
+            strideShape,
+            outputShape
+            );
+}
+
+void StridedSlice4DReverseTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 3, 2, 3, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 3, 1 };
+    std::vector<int32_t> beginShape  { 4 };
+    std::vector<int32_t> endShape    { 4 };
+    std::vector<int32_t> strideShape { 4 };
+
+    std::vector<int32_t> beginData  { 1, -1, 0, 0 };
+    std::vector<int32_t> endData    { 2, -3, 3, 1 };
+    std::vector<int32_t> strideData { 1, -1, 1, 1 };
+    std::vector<float>   inputData  { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
+                                      3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
+                                      5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
+    std::vector<float>   outputData { 4.0f, 4.0f, 4.0f, 3.0f, 3.0f, 3.0f };
+
+    StridedSliceTestImpl<float>(
+            backends,
+            inputData,
+            outputData,
+            beginData,
+            endData,
+            strideData,
+            inputShape,
+            beginShape,
+            endShape,
+            strideShape,
+            outputShape
+    );
+}
+
+void StridedSliceSimpleStrideTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 3, 2, 3, 1 };
+    std::vector<int32_t> outputShape { 2, 1, 2, 1 };
+    std::vector<int32_t> beginShape  { 4 };
+    std::vector<int32_t> endShape    { 4 };
+    std::vector<int32_t> strideShape { 4 };
+
+    std::vector<int32_t> beginData  { 0, 0, 0, 0 };
+    std::vector<int32_t> endData    { 3, 2, 3, 1 };
+    std::vector<int32_t> strideData { 2, 2, 2, 1 };
+    std::vector<float>   inputData  { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
+                                      3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
+                                      5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
+    std::vector<float>   outputData { 1.0f, 1.0f,
+                                      5.0f, 5.0f };
+
+    StridedSliceTestImpl<float>(
+            backends,
+            inputData,
+            outputData,
+            beginData,
+            endData,
+            strideData,
+            inputShape,
+            beginShape,
+            endShape,
+            strideShape,
+            outputShape
+    );
+}
+
+void StridedSliceSimpleRangeMaskTest(std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape  { 3, 2, 3, 1 };
+    std::vector<int32_t> outputShape { 3, 2, 3, 1 };
+    std::vector<int32_t> beginShape  { 4 };
+    std::vector<int32_t> endShape    { 4 };
+    std::vector<int32_t> strideShape { 4 };
+
+    std::vector<int32_t> beginData  { 1, 1, 1, 1 };
+    std::vector<int32_t> endData    { 1, 1, 1, 1 };
+    std::vector<int32_t> strideData { 1, 1, 1, 1 };
+
+    int beginMask = -1;
+    int endMask   = -1;
+
+    std::vector<float>   inputData  { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
+                                      3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
+                                      5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
+    std::vector<float>   outputData { 1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
+                                      3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f,
+                                      5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f };
+
+    StridedSliceTestImpl<float>(
+            backends,
+            inputData,
+            outputData,
+            beginData,
+            endData,
+            strideData,
+            inputShape,
+            beginShape,
+            endShape,
+            strideShape,
+            outputShape,
+            beginMask,
+            endMask
+    );
+}
+
+TEST_SUITE("StridedSlice_CpuRefTests")
+{
+
+TEST_CASE ("StridedSlice_4D_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    StridedSlice4DTest(backends);
+}
+
+TEST_CASE ("StridedSlice_4D_Reverse_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    StridedSlice4DReverseTest(backends);
+}
+
+TEST_CASE ("StridedSlice_SimpleStride_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    StridedSliceSimpleStrideTest(backends);
+}
+
+TEST_CASE ("StridedSlice_SimpleRange_CpuRef_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    StridedSliceSimpleRangeMaskTest(backends);
+}
+
+} // StridedSlice_CpuRefTests TestSuite
+
+
+
+TEST_SUITE("StridedSlice_CpuAccTests")
+{
+
+TEST_CASE ("StridedSlice_4D_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    StridedSlice4DTest(backends);
+}
+
+TEST_CASE ("StridedSlice_4D_Reverse_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    StridedSlice4DReverseTest(backends);
+}
+
+TEST_CASE ("StridedSlice_SimpleStride_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    StridedSliceSimpleStrideTest(backends);
+}
+
+TEST_CASE ("StridedSlice_SimpleRange_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    StridedSliceSimpleRangeMaskTest(backends);
+}
+
+} // StridedSlice_CpuAccTests TestSuite
+
+
+
+TEST_SUITE("StridedSlice_GpuAccTests")
+{
+
+TEST_CASE ("StridedSlice_4D_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    StridedSlice4DTest(backends);
+}
+
+TEST_CASE ("StridedSlice_4D_Reverse_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    StridedSlice4DReverseTest(backends);
+}
+
+TEST_CASE ("StridedSlice_SimpleStride_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    StridedSliceSimpleStrideTest(backends);
+}
+
+TEST_CASE ("StridedSlice_SimpleRange_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    StridedSliceSimpleRangeMaskTest(backends);
+}
+
+} // StridedSlice_GpuAccTests TestSuite
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/StridedSliceTestHelper.hpp b/delegate/test/StridedSliceTestHelper.hpp
new file mode 100644
index 0000000..fde7e16
--- /dev/null
+++ b/delegate/test/StridedSliceTestHelper.hpp
@@ -0,0 +1,221 @@
+//
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+#include <armnn/DescriptorsFwd.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+#include <string>
+
+namespace
+{
+
+std::vector<char> CreateStridedSliceTfLiteModel(tflite::TensorType tensorType,
+                                                const std::vector<int32_t>& inputTensorShape,
+                                                const std::vector<int32_t>& beginTensorData,
+                                                const std::vector<int32_t>& endTensorData,
+                                                const std::vector<int32_t>& strideTensorData,
+                                                const std::vector<int32_t>& beginTensorShape,
+                                                const std::vector<int32_t>& endTensorShape,
+                                                const std::vector<int32_t>& strideTensorShape,
+                                                const std::vector<int32_t>& outputTensorShape,
+                                                const int32_t beginMask,
+                                                const int32_t endMask,
+                                                const int32_t ellipsisMask,
+                                                const int32_t newAxisMask,
+                                                const int32_t ShrinkAxisMask,
+                                                const armnn::DataLayout& dataLayout)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    flatbuffers::Offset<tflite::Buffer> buffers[6] = {
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
+                                                        sizeof(int32_t) * beginTensorData.size())),
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(endTensorData.data()),
+                                                        sizeof(int32_t) * endTensorData.size())),
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(strideTensorData.data()),
+                                                        sizeof(int32_t) * strideTensorData.size())),
+            CreateBuffer(flatBufferBuilder)
+    };
+
+    std::array<flatbuffers::Offset<Tensor>, 5> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("input"));
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(beginTensorShape.data(),
+                                                                      beginTensorShape.size()),
+                              ::tflite::TensorType_INT32,
+                              2,
+                              flatBufferBuilder.CreateString("begin_tensor"));
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(endTensorShape.data(),
+                                                                      endTensorShape.size()),
+                              ::tflite::TensorType_INT32,
+                              3,
+                              flatBufferBuilder.CreateString("end_tensor"));
+    tensors[3] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(strideTensorShape.data(),
+                                                                      strideTensorShape.size()),
+                              ::tflite::TensorType_INT32,
+                              4,
+                              flatBufferBuilder.CreateString("stride_tensor"));
+    tensors[4] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              5,
+                              flatBufferBuilder.CreateString("output"));
+
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_StridedSliceOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateStridedSliceOptions(flatBufferBuilder,
+                                                                                 beginMask,
+                                                                                 endMask,
+                                                                                 ellipsisMask,
+                                                                                 newAxisMask,
+                                                                                 ShrinkAxisMask).Union();
+
+    const std::vector<int> operatorInputs{ 0, 1, 2, 3 };
+    const std::vector<int> operatorOutputs{ 4 };
+    flatbuffers::Offset <Operator> sliceOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{ 0, 1, 2, 3 };
+    const std::vector<int> subgraphOutputs{ 4 };
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&sliceOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: StridedSlice Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         BuiltinOperator_STRIDED_SLICE);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers, 6));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void StridedSliceTestImpl(std::vector<armnn::BackendId>& backends,
+                          std::vector<T>& inputValues,
+                          std::vector<T>& expectedOutputValues,
+                          std::vector<int32_t>& beginTensorData,
+                          std::vector<int32_t>& endTensorData,
+                          std::vector<int32_t>& strideTensorData,
+                          std::vector<int32_t>& inputTensorShape,
+                          std::vector<int32_t>& beginTensorShape,
+                          std::vector<int32_t>& endTensorShape,
+                          std::vector<int32_t>& strideTensorShape,
+                          std::vector<int32_t>& outputTensorShape,
+                          const int32_t beginMask = 0,
+                          const int32_t endMask = 0,
+                          const int32_t ellipsisMask = 0,
+                          const int32_t newAxisMask = 0,
+                          const int32_t ShrinkAxisMask = 0,
+                          const armnn::DataLayout& dataLayout = armnn::DataLayout::NHWC)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateStridedSliceTfLiteModel(
+            ::tflite::TensorType_FLOAT32,
+            inputTensorShape,
+            beginTensorData,
+            endTensorData,
+            strideTensorData,
+            beginTensorShape,
+            endTensorShape,
+            strideTensorShape,
+            outputTensorShape,
+            beginMask,
+            endMask,
+            ellipsisMask,
+            newAxisMask,
+            ShrinkAxisMask,
+            dataLayout);
+
+    auto tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegate) == kTfLiteOk);
+    CHECK(armnnDelegate != nullptr);
+    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteDelegate;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteDelegate) == kTfLiteOk);
+    CHECK(tfLiteDelegate != nullptr);
+    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
+
+    // Run EnqueWorkload
+    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
+                                        armnnDelegate,
+                                        outputTensorShape,
+                                        expectedOutputValues);
+
+    tfLiteDelegate.reset(nullptr);
+    armnnDelegate.reset(nullptr);
+} // End of StridedSlice Test
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/TestUtils.cpp b/delegate/test/TestUtils.cpp
new file mode 100644
index 0000000..2689c2e
--- /dev/null
+++ b/delegate/test/TestUtils.cpp
@@ -0,0 +1,152 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TestUtils.hpp"
+
+namespace armnnDelegate
+{
+
+void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize)
+{
+    auto compareBool = [](auto a, auto b) {return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));};
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(compareBool(tensor1[i], tensor2[i]));
+    }
+}
+
+void CompareData(std::vector<bool>& tensor1, bool tensor2[], size_t tensorSize)
+{
+    auto compareBool = [](auto a, auto b) {return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));};
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(compareBool(tensor1[i], tensor2[i]));
+    }
+}
+
+void CompareData(float tensor1[], float tensor2[], size_t tensorSize)
+{
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(tensor1[i] == doctest::Approx( tensor2[i] ));
+    }
+}
+
+void CompareData(float tensor1[], float tensor2[], size_t tensorSize, float percentTolerance)
+{
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <=
+              std::abs(tensor1[i]*percentTolerance/100));
+    }
+}
+
+void CompareData(uint8_t tensor1[], uint8_t tensor2[], size_t tensorSize)
+{
+    uint8_t tolerance = 1;
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <= tolerance);
+    }
+}
+
+void CompareData(int16_t tensor1[], int16_t tensor2[], size_t tensorSize)
+{
+    int16_t tolerance = 1;
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <= tolerance);
+    }
+}
+
+void CompareData(int32_t tensor1[], int32_t tensor2[], size_t tensorSize)
+{
+    int32_t tolerance = 1;
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <= tolerance);
+    }
+}
+
+void CompareData(int8_t tensor1[], int8_t tensor2[], size_t tensorSize)
+{
+    int8_t tolerance = 1;
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(std::max(tensor1[i], tensor2[i]) - std::min(tensor1[i], tensor2[i]) <= tolerance);
+    }
+}
+
+void CompareData(Half tensor1[], Half tensor2[], size_t tensorSize)
+{
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        CHECK(tensor1[i] == doctest::Approx( tensor2[i] ));
+    }
+}
+
+void CompareData(TfLiteFloat16 tensor1[], TfLiteFloat16 tensor2[], size_t tensorSize)
+{
+    uint16_t tolerance = 1;
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        uint16_t tensor1Data = tensor1[i].data;
+        uint16_t tensor2Data = tensor2[i].data;
+        CHECK(std::max(tensor1Data, tensor2Data) - std::min(tensor1Data, tensor2Data) <= tolerance);
+    }
+}
+
+void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize) {
+    uint16_t tolerance = 1;
+    for (size_t i = 0; i < tensorSize; i++)
+    {
+        uint16_t tensor1Data = tensor1[i].data;
+        uint16_t tensor2Data = half_float::detail::float2half<std::round_indeterminate, float>(tensor2[i]);
+        CHECK(std::max(tensor1Data, tensor2Data) - std::min(tensor1Data, tensor2Data) <= tolerance);
+    }
+}
+
+template <>
+void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
+                       std::unique_ptr<tflite::Interpreter>& armnnDelegateInterpreter,
+                       std::vector<int32_t>& expectedOutputShape,
+                       std::vector<Half>& expectedOutputValues,
+                       unsigned int outputIndex)
+{
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
+    auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
+    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex];
+    auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<TfLiteFloat16>(armnnDelegateOutputId);
+
+        CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
+        CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
+
+    for (size_t i = 0; i < expectedOutputShape.size(); i++)
+    {
+        CHECK(armnnDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
+        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
+        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
+    }
+
+    armnnDelegate::CompareData(armnnDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+}
+
+template <>
+void FillInput<Half>(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<Half>& inputValues)
+{
+    auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
+    auto tfLiteDelageInputData = interpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelageInputData[i].data = half_float::detail::float2half<std::round_indeterminate, float>(inputValues[i]);
+
+    }
+}
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/TestUtils.hpp b/delegate/test/TestUtils.hpp
new file mode 100644
index 0000000..95dd257
--- /dev/null
+++ b/delegate/test/TestUtils.hpp
@@ -0,0 +1,101 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <tensorflow/lite/c/common.h>
+#include <tensorflow/lite/interpreter.h>
+
+#include <doctest/doctest.h>
+
+#include <half/half.hpp>
+
+using Half = half_float::half;
+
+namespace armnnDelegate
+{
+
+/// Can be used to assign input data from a vector to a model input.
+/// Example usage can be found in ResizeTesthelper.hpp
+template <typename T>
+void FillInput(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<T>& inputValues)
+{
+    auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
+    auto tfLiteDelageInputData = interpreter->typed_tensor<T>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelageInputData[i] = inputValues[i];
+    }
+}
+
+template <>
+void FillInput(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<Half>& inputValues);
+
+/// Can be used to compare bool data coming from a tflite interpreter
+/// Boolean types get converted to a bit representation in a vector. vector.data() returns a void pointer
+/// instead of a pointer to bool. Therefore a special function to compare to vector of bool is required
+void CompareData(std::vector<bool>& tensor1, bool tensor2[], size_t tensorSize);
+void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize);
+
+/// Can be used to compare float data coming from a tflite interpreter with a tolerance of limit_of_float*100
+void CompareData(float tensor1[], float tensor2[], size_t tensorSize);
+
+/// Can be used to compare float data coming from a tflite interpreter with a given percentage tolerance
+void CompareData(float tensor1[], float tensor2[], size_t tensorSize, float percentTolerance);
+
+/// Can be used to compare int8_t data coming from a tflite interpreter with a tolerance of 1
+void CompareData(int8_t tensor1[], int8_t tensor2[], size_t tensorSize);
+
+/// Can be used to compare uint8_t data coming from a tflite interpreter with a tolerance of 1
+void CompareData(uint8_t tensor1[], uint8_t tensor2[], size_t tensorSize);
+
+/// Can be used to compare int16_t data coming from a tflite interpreter with a tolerance of 1
+void CompareData(int16_t tensor1[], int16_t tensor2[], size_t tensorSize);
+
+/// Can be used to compare int32_t data coming from a tflite interpreter with a tolerance of 1
+void CompareData(int32_t tensor1[], int32_t tensor2[], size_t tensorSize);
+
+/// Can be used to compare Half (Float16) data with a tolerance of limit_of_float*100
+void CompareData(Half tensor1[], Half tensor2[], size_t tensorSize);
+
+/// Can be used to compare TfLiteFloat16 data coming from a tflite interpreter
+void CompareData(TfLiteFloat16 tensor1[], TfLiteFloat16 tensor2[], size_t tensorSize);
+
+/// Can be used to compare Half (Float16) data and TfLiteFloat16 data coming from a tflite interpreter
+void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize);
+
+/// Can be used to compare the output tensor shape and values
+/// from armnnDelegateInterpreter and tfLiteInterpreter.
+/// Example usage can be found in ControlTestHelper.hpp
+template <typename T>
+void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
+                       std::unique_ptr<tflite::Interpreter>& armnnDelegateInterpreter,
+                       std::vector<int32_t>& expectedOutputShape,
+                       std::vector<T>& expectedOutputValues,
+                       unsigned int outputIndex = 0)
+{
+    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
+    auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
+    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex];
+    auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
+
+    CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
+    CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
+
+    for (size_t i = 0; i < expectedOutputShape.size(); i++)
+    {
+        CHECK(expectedOutputShape[i] == armnnDelegateOutputTensor->dims->data[i]);
+        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
+        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
+    }
+
+    armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData    , expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelegateOutputData   , expectedOutputValues.data(), expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelegateOutputData   , armnnDelegateOutputData    , expectedOutputValues.size());
+}
+
+} // namespace armnnDelegate
diff --git a/delegate/test/TransposeTest.cpp b/delegate/test/TransposeTest.cpp
new file mode 100644
index 0000000..c210128
--- /dev/null
+++ b/delegate/test/TransposeTest.cpp
@@ -0,0 +1,46 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TransposeTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <doctest/doctest.h>
+#include <flatbuffers/flatbuffers.h>
+
+namespace armnnDelegate
+{
+
+TEST_SUITE ("Transpose_GpuAccTests")
+{
+
+TEST_CASE ("Transpose_Float32_GpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+    TransposeFP32Test(backends);
+}
+
+}
+
+TEST_SUITE ("Transpose_CpuAccTests")
+{
+
+TEST_CASE ("Transpose_Float32_CpuAcc_Test")
+{
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+    TransposeFP32Test(backends);
+}
+
+}
+
+TEST_SUITE ("Transpose_CpuRefTests")
+{
+TEST_CASE ("Transpose_Float32_CpuRef_Test")
+{
+        std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+        TransposeFP32Test(backends);
+}
+}
+} // namespace armnnDelegate
diff --git a/delegate/test/TransposeTestHelper.hpp b/delegate/test/TransposeTestHelper.hpp
new file mode 100644
index 0000000..99bb60b
--- /dev/null
+++ b/delegate/test/TransposeTestHelper.hpp
@@ -0,0 +1,177 @@
+//
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+namespace
+{
+std::vector<char> CreateTransposeTfLiteModel(tflite::TensorType tensorType,
+                                             const std::vector <int32_t>& input0TensorShape,
+                                             const std::vector <int32_t>& inputPermVecShape,
+                                             const std::vector <int32_t>& outputTensorShape,
+                                             const std::vector<int32_t>& inputPermVec)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+    flatbuffers::Offset<tflite::Buffer> buffers[4]{
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputPermVec.data()),
+                                                        sizeof(int32_t) * inputPermVec.size())),
+            CreateBuffer(flatBufferBuilder)
+    };
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
+                                                                      input0TensorShape.size()),
+                              tensorType, 1);
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputPermVecShape.data(),
+                                                                      inputPermVecShape.size()),
+                              tflite::TensorType_INT32, 2,
+                              flatBufferBuilder.CreateString("permutation_vector"));
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,3);
+    const std::vector<int32_t> operatorInputs{0, 1};
+    const std::vector<int32_t> operatorOutputs{2};
+    flatbuffers::Offset <Operator> transposeOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           BuiltinOptions_TransposeOptions,
+                           CreateTransposeOptions(flatBufferBuilder).Union());
+    const std::vector<int> subgraphInputs{0, 1};
+    const std::vector<int> subgraphOutputs{2};
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&transposeOperator, 1));
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: Transpose Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
+                                                                         tflite::BuiltinOperator_TRANSPOSE);
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers, 4));
+    flatBufferBuilder.Finish(flatbufferModel);
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    using namespace tflite;
+
+    // set test input data
+    std::vector<int32_t> input0Shape {4, 2, 3};
+    std::vector<int32_t> inputPermVecShape {3};
+    std::vector<int32_t> outputShape {2, 3, 4};
+
+    std::vector<float> input0Values = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11,
+                                       12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23};
+    std::vector<int32_t> inputPermVec = {2, 0, 1};
+    std::vector<float> expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10,
+                                               13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23};
+
+    // create model
+    std::vector<char> modelBuffer = CreateTransposeTfLiteModel(::tflite::TensorType_FLOAT32,
+                                                               input0Shape,
+                                                               inputPermVecShape,
+                                                               outputShape,
+                                                               inputPermVec);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data for tflite
+    auto tfLiteInterpreterInput0Id = tfLiteInterpreter->inputs()[0];
+    auto tfLiteInterpreterInput0Data = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterInput0Id);
+    for (unsigned int i = 0; i < input0Values.size(); ++i)
+    {
+        tfLiteInterpreterInput0Data[i] = input0Values[i];
+    }
+
+    auto tfLiteInterpreterInput1Id = tfLiteInterpreter->inputs()[1];
+    auto tfLiteInterpreterInput1Data = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteInterpreterInput1Id);
+    for (unsigned int i = 0; i < inputPermVec.size(); ++i)
+    {
+        tfLiteInterpreterInput1Data[i] = inputPermVec[i];
+    }
+
+    //Set input data for armnn delegate
+    auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInput0Id);
+    for (unsigned int i = 0; i < input0Values.size(); ++i)
+    {
+        armnnDelegateInput0Data[i] = input0Values[i];
+    }
+
+    auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
+    auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<int32_t>(armnnDelegateInput1Id);
+    for (unsigned int i = 0; i < inputPermVec.size(); ++i)
+    {
+        armnnDelegateInput1Data[i] = inputPermVec[i];
+    }
+
+    // Run EnqueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
+    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+    for (size_t i = 0; i < expectedOutputValues.size(); ++i)
+    {
+        CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
+        CHECK(tfLiteInterpreterOutputData[i] == expectedOutputValues[i]);
+        CHECK(tfLiteInterpreterOutputData[i] == armnnDelegateOutputData[i]);
+    }
+
+    armnnDelegateInterpreter.reset(nullptr);
+}
+}
diff --git a/delegate/test/UnidirectionalSequenceLstmTest.cpp b/delegate/test/UnidirectionalSequenceLstmTest.cpp
new file mode 100644
index 0000000..6d896d7
--- /dev/null
+++ b/delegate/test/UnidirectionalSequenceLstmTest.cpp
@@ -0,0 +1,1464 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "UnidirectionalSequenceLstmTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+void UnidirectionalSequenceLstmTest(std::vector<armnn::BackendId>& backends)
+{
+    int32_t batchSize = 3;
+    int32_t timeSize = 2;
+    int32_t inputSize = 3;
+    int32_t outputSize = 4;
+    // cellSize and outputSize have the same size when there is no projection.
+    int32_t numUnits = outputSize;
+
+    //tensorInfo12,
+    bool hasInputToInputWeights = true;
+    std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
+                                               -0.117484632f, 0.3298470976f, -0.1179017122f,
+                                               0.214305695f, 0.42135173085f, 0.003878414626f,
+                                               -0.348303917f, -0.1881275477f, 0.0343011027f };
+
+    std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
+                                                -0.3810434485f, 0.268383264f, -0.009807467424f,
+                                                -0.3522925403f, -0.24275735512f, -0.28344226125f,
+                                                0.13512269116f, -0.4932442977f, -0.10039821991f };
+
+    std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
+                                              0.386399507f, -0.259465157985f, -0.16545993089f,
+                                              -0.4230232555f, 0.341664791103f, -0.18127849691f,
+                                              -0.2277662414f, -0.55275535589f, 0.34184026718f };
+
+    std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
+                                                0.53969591851f, 0.23393625035f, -0.27140527306f,
+                                                0.50009280443f, 0.07511717046f, 0.3998299249f,
+                                                -0.51717478049f, 0.1889653282f, -0.367323637f };
+
+    //tensorInfo16,
+    bool hasRecurrentToInputWeights = true;
+    std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
+                                                   -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
+                                                   0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
+                                                   0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f };
+
+    std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
+                                                    -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
+                                                    -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
+                                                    -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
+
+    std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
+                                                  -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
+                                                  0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
+                                                  0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
+
+    std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
+                                                    -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
+                                                    0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
+                                                    -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
+    // tensorInfo4
+    bool hasCellToInputWeights = false;
+    std::vector<float> cellToInputWeights;
+    bool hasCellToForgetWeights = false;
+    std::vector<float> cellToForgetWeights;
+    bool hasCellToOutputWeights = false;
+    std::vector<float> cellToOutputWeights;
+
+    bool hasInputGateBias = true;
+    std::vector<float> inputGateBias = {0., 0., 0., 0.};
+    std::vector<float> forgetGateBias = {1., 1., 1., 1.};
+    std::vector<float> cellBias = {0., 0., 0., 0.};
+    std::vector<float> outputGateBias = {0., 0., 0., 0.};
+
+    bool hasProjectionWeights = false;
+    std::vector<float> projectionWeights;
+    bool hasProjectionBias = false;
+    std::vector<float> projectionBias;
+
+    bool hasInputLayerNormWeights = false;
+    std::vector<float> inputLayerNormWeights;
+    bool hasForgetLayerNormWeights = false;
+    std::vector<float> forgetLayerNormWeights;
+    bool hasCellLayerNormWeights = false;
+    std::vector<float> cellLayerNormWeights;
+    bool hasOutputLayerNormWeights = false;
+    std::vector<float> outputLayerNormWeights;
+
+    std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
+                                       3., 2., 1., 2., 3., 4.,
+                                       5., 4., 3., 2., 1., 2. };
+    std::vector<float> expectedOutputValues = { -0.0714901f, -0.162117f, -0.175168f, -0.0232934f,
+                                                -0.168107f, -0.414129f, -0.549875f, -0.00803579f,
+                                                -0.0668735f, 0.204078f, -0.42765f, -0.0312321f,
+                                                -0.120003f, -0.0941918f, -0.456391f, -0.0287019f,
+                                                -0.0342921f, 0.20824f, -0.656989f, -0.00415265f,
+                                                -0.10493f, 0.14211f, -0.583478f, -0.0329754f };
+
+    tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+    float clippingThresCell = 10.f;
+    float clippingThresProj = 0.f;
+    bool isTimeMajor = false;
+
+    UnidirectionalSequenceLstmTestImpl<float>(backends,
+                                              ::tflite::TensorType_FLOAT32,
+                                              batchSize,
+                                              timeSize,
+                                              inputSize,
+                                              outputSize,
+                                              numUnits,
+                                              hasInputToInputWeights,
+                                              inputToInputWeights,
+                                              inputToForgetWeights,
+                                              inputToCellWeights,
+                                              inputToOutputWeights,
+                                              hasRecurrentToInputWeights,
+                                              recurrentToInputWeights,
+                                              recurrentToForgetWeights,
+                                              recurrentToCellWeights,
+                                              recurrentToOutputWeights,
+                                              hasCellToInputWeights,
+                                              cellToInputWeights,
+                                              hasCellToForgetWeights,
+                                              cellToForgetWeights,
+                                              hasCellToOutputWeights,
+                                              cellToOutputWeights,
+                                              hasInputGateBias,
+                                              inputGateBias,
+                                              forgetGateBias,
+                                              cellBias,
+                                              outputGateBias,
+                                              hasProjectionWeights,
+                                              projectionWeights,
+                                              hasProjectionBias,
+                                              projectionBias,
+                                              hasInputLayerNormWeights,
+                                              inputLayerNormWeights,
+                                              hasForgetLayerNormWeights,
+                                              forgetLayerNormWeights,
+                                              hasCellLayerNormWeights,
+                                              cellLayerNormWeights,
+                                              hasOutputLayerNormWeights,
+                                              outputLayerNormWeights,
+                                              inputValues,
+                                              expectedOutputValues,
+                                              activationFunction,
+                                              clippingThresCell,
+                                              clippingThresProj,
+                                              isTimeMajor);
+}
+
+void UnidirectionalSequenceLstmTimeMajorTest(std::vector<armnn::BackendId>& backends)
+{
+    int32_t batchSize = 3;
+    int32_t timeSize = 2;
+    int32_t inputSize = 3;
+    int32_t outputSize = 4;
+    // cellSize and outputSize have the same size when there is no projection.
+    int32_t numUnits = outputSize;
+
+    std::vector<int32_t> inputShape = {timeSize, batchSize, inputSize};
+    std::vector<int32_t> cellStateInTensorInfo = {batchSize, numUnits};
+    std::vector<int32_t> outputStateInTensorInfo = {batchSize, outputSize};
+
+    std::vector<int32_t> outputTensorInfo = {timeSize, batchSize, outputSize};
+
+    //tensorInfo12
+    bool hasInputToInputWeights = true;
+    std::vector<float> inputToInputWeights = { 0.27277296781539917f, 0.3813590407371521f, -0.394489049911499f,
+                                               0.2782636880874634f, -0.3793870210647583f, -0.018918335437774658f,
+                                               0.2724653482437134f, -0.19314253330230713f, -0.2947450876235962f,
+                                               -0.30253493785858154f, 0.4241350293159485f, -0.22560018301010132f };
+
+    std::vector<float> inputToForgetWeights = { -0.2667974531650543f, -0.05505800247192383f, -0.20932340621948242f,
+                                                -0.14345619082450867f, 0.09666192531585693f, -0.2604355812072754f,
+                                                -0.2681812047958374f, -0.3314584493637085f, 0.4485899806022644f,
+                                                -0.23467743396759033f, 0.5072842240333557f, -0.4192768931388855f };
+
+    std::vector<float> inputToCellWeights = { -0.15782442688941956f, -0.027530014514923096f, 0.4789854884147644f,
+                                              0.23227906227111816f, 0.28259342908859253f, -0.030095696449279785f,
+                                              0.10071521997451782f, -0.08535495400428772f, 0.18563997745513916f,
+                                              -0.3049069046974182f, -0.478048175573349f, 0.025234103202819824f };
+
+    std::vector<float> inputToOutputWeights = { -0.04584759473800659f, -0.2716066539287567f, 0.012970447540283203f,
+                                                -0.4729190170764923f, -0.37422770261764526f, 0.49352723360061646f,
+                                                0.3163864016532898f, -0.436781644821167f, -0.33074596524238586f,
+                                                -0.32885751128196716f, -0.40959352254867554f, -0.2124689817428589f };
+
+    //tensorInfo16
+    bool hasRecurrentToInputWeights = true;
+    std::vector<float> recurrentToInputWeights = { 0.23788475990f, -0.24948765337f, 0.50044941902f, 0.14431896805f,
+                                                   -0.115940228137f, -0.717082679f, -0.17208620906f, 0.17850610617f,
+                                                   -0.16702319684f, -0.11384502053f, -0.309785276245f, -0.3316611672f,
+                                                   0.52380162477f, -0.06839632987f, -0.391478359627f, -0.10756178963f };
+
+    std::vector<float> recurrentToForgetWeights = { 0.11383482068f, 0.1676601767f, -0.08550968004f, 0.03399394089f,
+                                                    0.08042152225f, -0.2133381964f, 0.05182432704f, 0.38161808255f,
+                                                    -0.5018365979f, -0.08043262364f, 0.07894329014f, -0.07547105155f,
+                                                    0.12047368288f, 0.2986997961f, 0.0485043078f, -0.13372567296f };
+
+    std::vector<float> recurrentToCellWeights = { 0.0433832928545f, 0.07587072294f, -0.120520234107f, 0.604576051f,
+                                                  -0.434353142986f, 0.009314475068f, 0.005085289478f, 0.08488202038f,
+                                                  -0.00025437487886f, 0.15245915082f, -0.1936587542f, 0.004754020f,
+                                                  -0.1582719236f, 0.3307867646f, 0.0236605107784f, 0.307716339826f };
+
+    std::vector<float> recurrentToOutputWeights = { -0.079031050201f, 0.041414566286f, -0.583727357285f, 0.1025384515f,
+                                                    -0.172372072937f, 0.09214124082f, 0.178184121827f, -0.2439443916f,
+                                                    0.104485116899f, 0.2600405514f, 0.064414866268f, 0.24141204357f,
+                                                    0.281875759363f, -0.14234502664f, 0.15126448862f, -0.24421440064f };
+    // tensorInfo4
+    bool hasCellToInputWeights = false;
+    std::vector<float> cellToInputWeights;
+    bool hasCellToForgetWeights = false;
+    std::vector<float> cellToForgetWeights;
+    bool hasCellToOutputWeights = false;
+    std::vector<float> cellToOutputWeights;
+
+    bool hasInputGateBias = true;
+    std::vector<float> inputGateBias = {0., 0., 0., 0.};
+    std::vector<float> forgetGateBias = {1., 1., 1., 1.};
+    std::vector<float> cellBias = {0., 0., 0., 0.};
+    std::vector<float> outputGateBias = {0., 0., 0., 0.};
+
+    bool hasProjectionWeights = false;
+    std::vector<float> projectionWeights;
+    bool hasProjectionBias = false;
+    std::vector<float> projectionBias;
+
+    bool hasInputLayerNormWeights = false;
+    std::vector<float> inputLayerNormWeights;
+    bool hasForgetLayerNormWeights = false;
+    std::vector<float> forgetLayerNormWeights;
+    bool hasCellLayerNormWeights = false;
+    std::vector<float> cellLayerNormWeights;
+    bool hasOutputLayerNormWeights = false;
+    std::vector<float> outputLayerNormWeights;
+
+    std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
+                                       3., 2., 1., 2., 3., 4.,
+                                       5., 4., 3., 2., 1., 2. };
+    std::vector<float> expectedOutputValues = { 0.135658f, 0.124673f, 0.021209f, -0.0530204f,
+                                                0.106138f, 0.0404792f, 0.0151644f, -0.00675166f,
+                                                -0.0128514f, 0.0644884f, 0.0709072f, -0.0454045f,
+                                                0.162886f, 0.166494f, 0.0277046f, -0.0369807f,
+                                                0.111716f, 0.043119f, 0.0762981f, -0.0122854f,
+                                                0.104397f, 0.2144f, 0.119192f, -0.0839058f };
+
+    tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+    float clippingThresCell = 10.f;
+    float clippingThresProj = 0.f;
+    bool isTimeMajor = true;
+
+    UnidirectionalSequenceLstmTestImpl<float>(backends,
+                                              ::tflite::TensorType_FLOAT32,
+                                              batchSize,
+                                              timeSize,
+                                              inputSize,
+                                              outputSize,
+                                              numUnits,
+                                              hasInputToInputWeights,
+                                              inputToInputWeights,
+                                              inputToForgetWeights,
+                                              inputToCellWeights,
+                                              inputToOutputWeights,
+                                              hasRecurrentToInputWeights,
+                                              recurrentToInputWeights,
+                                              recurrentToForgetWeights,
+                                              recurrentToCellWeights,
+                                              recurrentToOutputWeights,
+                                              hasCellToInputWeights,
+                                              cellToInputWeights,
+                                              hasCellToForgetWeights,
+                                              cellToForgetWeights,
+                                              hasCellToOutputWeights,
+                                              cellToOutputWeights,
+                                              hasInputGateBias,
+                                              inputGateBias,
+                                              forgetGateBias,
+                                              cellBias,
+                                              outputGateBias,
+                                              hasProjectionWeights,
+                                              projectionWeights,
+                                              hasProjectionBias,
+                                              projectionBias,
+                                              hasInputLayerNormWeights,
+                                              inputLayerNormWeights,
+                                              hasForgetLayerNormWeights,
+                                              forgetLayerNormWeights,
+                                              hasCellLayerNormWeights,
+                                              cellLayerNormWeights,
+                                              hasOutputLayerNormWeights,
+                                              outputLayerNormWeights,
+                                              inputValues,
+                                              expectedOutputValues,
+                                              activationFunction,
+                                              clippingThresCell,
+                                              clippingThresProj,
+                                              isTimeMajor);
+}
+
+void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(std::vector<armnn::BackendId>& backends)
+{
+    int32_t batchSize = 2;
+    int32_t timeSize = 3;
+    int32_t inputSize = 4;
+    int32_t outputSize = 5;
+    int32_t numUnits = 6;
+
+    std::vector<int32_t> inputShape = {batchSize, timeSize, inputSize};
+    std::vector<int32_t> cellStateInTensorInfo = {batchSize, numUnits};
+    std::vector<int32_t> outputStateInTensorInfo = {batchSize, outputSize};
+
+    std::vector<int32_t> outputTensorInfo = {batchSize, timeSize, outputSize};
+
+    //tensorInfoInputSize,
+    bool hasInputToInputWeights = true;
+    std::vector<float> inputToInputWeights = { 0.021393683f, 0.06124551f, 0.046905167f, -0.014657677f,
+                                               -0.03149463f, 0.09171803f, 0.14647801f, 0.10797193f,
+                                               -0.0057968358f, 0.0019193048f, -0.2726754f, 0.10154029f,
+                                               -0.018539885f, 0.080349885f, -0.10262385f, -0.022599787f,
+                                               -0.09121155f, -0.008675967f, -0.045206103f, -0.0821282f,
+                                               -0.008045952f, 0.015478081f, 0.055217247f, 0.038719587f };
+
+    std::vector<float> inputToForgetWeights = { -0.0018401089f, -0.004852237f, 0.03698424f, 0.014181704f,
+                                                0.028273236f, -0.016726194f, -0.05249759f, -0.10204261f,
+                                                0.00861066f, -0.040979505f, -0.009899187f, 0.01923892f,
+                                                -0.028177269f, -0.08535103f, -0.14585495f, 0.10662567f,
+                                                -0.01909731f, -0.017883534f, -0.0047269356f, -0.045103323f,
+                                                0.0030784295f, 0.076784775f, 0.07463696f, 0.094531395f};
+
+    std::vector<float> inputToCellWeights = { -0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
+                                              -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
+                                              -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
+                                              -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
+                                              -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
+                                              0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f };
+
+    std::vector<float> inputToOutputWeights = { -0.0998932f, -0.07201956f, -0.052803773f, -0.15629593f,
+                                                -0.15001918f, -0.07650751f, 0.02359855f, -0.075155355f,
+                                                -0.08037709f, -0.15093534f, 0.029517552f, -0.04751393f,
+                                                0.010350531f, -0.02664851f, -0.016839722f, -0.023121163f,
+                                                0.0077019283f, 0.012851257f, -0.05040649f, -0.0129761f,
+                                                -0.021737747f, -0.038305793f, -0.06870586f, -0.01481247f };
+
+    //tensorInfoOutputSize,
+    bool hasRecurrentToInputWeights = true;
+    std::vector<float> recurrentToInputWeights = { -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
+                                                   -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
+                                                   -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
+                                                   -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
+                                                   0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f,
+                                                   0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
+                                                   -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
+                                                   0.14283475f, -0.07390571f };
+
+    std::vector<float> recurrentToForgetWeights = { -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
+                                                   0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
+                                                   -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
+                                                   0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
+                                                   0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f,
+                                                   -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
+                                                   -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
+                                                   0.061878487f, -0.04729229f };
+
+    std::vector<float> recurrentToCellWeights = { -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
+                                                   0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
+                                                   0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
+                                                   -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
+                                                   0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
+                                                   0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
+                                                   -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
+                                                   -0.019443132f, -0.030755889f };
+
+    std::vector<float> recurrentToOutputWeights = { 0.025825322f, -0.05813119f, 0.09495884f,
+                                                    -0.045984812f,-0.01255415f, -0.0026479573f,
+                                                    -0.08196161f, -0.054914974f, -0.0046604523f,
+                                                    -0.029587349f, -0.044576716f, -0.07480124f,
+                                                    -0.082868785f, 0.023254942f, 0.027502948f,
+                                                    -0.0039728214f, -0.08683098f, -0.08116779f,
+                                                    -0.014675607f, -0.037924774f, -0.023314456f,
+                                                    -0.007401714f, -0.09255757f, 0.029460307f,
+                                                    -0.08829125f, -0.005139627f, -0.08989442f,
+                                                    -0.0555066f, 0.13596267f, 0.025062224f };
+    // tensorInfoNumUnits
+    bool hasCellToInputWeights = true;
+    std::vector<float> cellToInputWeights = { 0.040369894f, 0.030746894f, 0.24704495f,
+                                              0.018586371f, -0.037586458f, -0.15312155f };
+    bool hasCellToForgetWeights = true;
+    std::vector<float> cellToForgetWeights = { -0.01998659f, -0.15568835f, -0.24248174f,
+                                               -0.012770197f, 0.041331276f, -0.072311886f };
+    bool hasCellToOutputWeights = true;
+    std::vector<float> cellToOutputWeights = { 0.08286371f, -0.08261836f, -0.51210177f,
+                                               0.002913762f, 0.17764764f, -0.5495371f };
+
+    bool hasInputGateBias = true;
+    std::vector<float> inputGateBias = { 0.02234832f, 0.14757581f, 0.18176508f,
+                                         0.10380666f, 0.053110216f, -0.06928846f };
+    std::vector<float> forgetGateBias = { 0.035185695f, -0.042891346f, -0.03032477f,
+                                          0.23027696f, 0.11098921f, 0.08989442f };
+    std::vector<float> cellBias = { -0.024379363f, 0.0055531194f, 0.23377132f,
+                                    0.033463873f, -0.1483596f, 0.029460307f };
+    std::vector<float> outputGateBias =  { 0.046159424f, -0.0012809046f, 0.03563469f,
+                                           0.12648113f, 0.027195795f, 0.35373217f };
+
+    bool hasProjectionWeights = true;
+    std::vector<float> projectionWeights = { -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
+                                             0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
+                                             -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
+                                             -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
+                                             0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
+                                             0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f };
+
+    bool hasProjectionBias = true;
+    std::vector<float> projectionBias(outputSize, 0.f);
+
+    bool hasInputLayerNormWeights = false;
+    std::vector<float> inputLayerNormWeights;
+    bool hasForgetLayerNormWeights = false;
+    std::vector<float> forgetLayerNormWeights;
+    bool hasCellLayerNormWeights = false;
+    std::vector<float> cellLayerNormWeights;
+    bool hasOutputLayerNormWeights = false;
+    std::vector<float> outputLayerNormWeights;
+
+    std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
+                                       3., 2., 1., 2., 3., 4.,
+                                       5., 4., 3., 2., 1., 2.,
+                                       1., 2., 3., 4., 5., 4.};
+    std::vector<float> expectedOutputValues = { -0.0135612f, -0.0263441f, 0.0314008f, -0.00883455f, 0.00763052f,
+                                                -0.00126877f, -0.0292959f, 0.0449957f, -0.00976195f, -0.00492338f,
+                                                -0.0175702f, -0.0431753f, 0.0597117f, -0.0169154f, 0.0142087f,
+                                                0.00472515f, -0.0196355f, 0.0342524f, -0.00407936f, -0.0253189f,
+                                                -0.00512944f, -0.0293754f, 0.0512771f, -0.0151874f, -0.0246433f,
+                                                -0.00744986f, -0.0345103f, 0.0450666f, -0.00944991f, 0.0126895f };
+
+    tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+    float clippingThresCell = 10.f;
+    float clippingThresProj = 0.f;
+    bool isTimeMajor = false;
+
+    UnidirectionalSequenceLstmTestImpl<float>(backends,
+                                              ::tflite::TensorType_FLOAT32,
+                                              batchSize,
+                                              timeSize,
+                                              inputSize,
+                                              outputSize,
+                                              numUnits,
+                                              hasInputToInputWeights,
+                                              inputToInputWeights,
+                                              inputToForgetWeights,
+                                              inputToCellWeights,
+                                              inputToOutputWeights,
+                                              hasRecurrentToInputWeights,
+                                              recurrentToInputWeights,
+                                              recurrentToForgetWeights,
+                                              recurrentToCellWeights,
+                                              recurrentToOutputWeights,
+                                              hasCellToInputWeights,
+                                              cellToInputWeights,
+                                              hasCellToForgetWeights,
+                                              cellToForgetWeights,
+                                              hasCellToOutputWeights,
+                                              cellToOutputWeights,
+                                              hasInputGateBias,
+                                              inputGateBias,
+                                              forgetGateBias,
+                                              cellBias,
+                                              outputGateBias,
+                                              hasProjectionWeights,
+                                              projectionWeights,
+                                              hasProjectionBias,
+                                              projectionBias,
+                                              hasInputLayerNormWeights,
+                                              inputLayerNormWeights,
+                                              hasForgetLayerNormWeights,
+                                              forgetLayerNormWeights,
+                                              hasCellLayerNormWeights,
+                                              cellLayerNormWeights,
+                                              hasOutputLayerNormWeights,
+                                              outputLayerNormWeights,
+                                              inputValues,
+                                              expectedOutputValues,
+                                              activationFunction,
+                                              clippingThresCell,
+                                              clippingThresProj,
+                                              isTimeMajor);
+}
+
+void UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(std::vector<armnn::BackendId>& backends)
+{
+    int32_t batchSize = 3;
+    int32_t timeSize = 2;
+    int32_t inputSize = 3;
+    int32_t outputSize = 4;
+    // cellSize and outputSize have the same size when there is no projection.
+    int32_t numUnits = outputSize;
+
+    //tensorInfo12
+    bool hasInputToInputWeights = false;
+    std::vector<float> inputToInputWeights{};
+
+    std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
+                                                -0.3810434485f, 0.268383264f, -0.009807467424f,
+                                                -0.3522925403f, -0.24275735512f, -0.28344226125f,
+                                                0.13512269116f, -0.4932442977f, -0.10039821991f };
+
+    std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
+                                              0.386399507f, -0.259465157985f, -0.16545993089f,
+                                              -0.4230232555f, 0.341664791103f, -0.18127849691f,
+                                              -0.2277662414f, -0.55275535589f, 0.34184026718f };
+
+    std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
+                                                0.53969591851f, 0.23393625035f, -0.27140527306f,
+                                                0.50009280443f, 0.07511717046f, 0.3998299249f,
+                                                -0.51717478049f, 0.1889653282f, -0.367323637f };
+
+    //tensorInfo16
+    bool hasRecurrentToInputWeights = false;
+    std::vector<float> recurrentToInputWeights{};
+
+    std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
+                                                    -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
+                                                    -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
+                                                    -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
+
+    std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
+                                                  -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
+                                                  0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
+                                                  0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
+
+    std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
+                                                    -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
+                                                    0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
+                                                    -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
+    // tensorInfo4
+    bool hasCellToInputWeights = false;
+    std::vector<float> cellToInputWeights;
+    bool hasCellToForgetWeights = true;
+    std::vector<float> cellToForgetWeights =  {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
+    bool hasCellToOutputWeights = true;
+    std::vector<float> cellToOutputWeights =  {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
+
+    bool hasInputGateBias = false;
+    std::vector<float> inputGateBias;
+    std::vector<float> forgetGateBias = {1., 1., 1., 1.};
+    std::vector<float> cellBias =  {0., 0., 0., 0.};
+    std::vector<float> outputGateBias = {0., 0., 0., 0.};
+
+    bool hasProjectionWeights = false;
+    std::vector<float> projectionWeights;
+    bool hasProjectionBias = false;
+    std::vector<float> projectionBias;
+
+    bool hasInputLayerNormWeights = false;
+    std::vector<float> inputLayerNormWeights;
+    bool hasForgetLayerNormWeights = false;
+    std::vector<float> forgetLayerNormWeights;
+    bool hasCellLayerNormWeights = false;
+    std::vector<float> cellLayerNormWeights;
+    bool hasOutputLayerNormWeights = false;
+    std::vector<float> outputLayerNormWeights;
+
+    std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
+                                       3., 2., 1., 2., 3., 4.,
+                                       5., 4., 3., 2., 1., 2. };
+    std::vector<float> expectedOutputValues =  { -0.0129257f, -0.070531f, -0.153508f, -0.0392391f,
+                                                 -0.0300169f, -0.195717f, -0.528679f, -0.0818106f,
+                                                 -0.0332748f, 0.155429f, -0.353966f, -0.0801505f,
+                                                 -0.032312f, -0.0407911f, -0.435053f, -0.0932317f,
+                                                 -0.0108233f, 0.165584f, -0.640424f, -0.0447535f,
+                                                 -0.031675f, 0.125987f, -0.526695f, -0.110093f };
+
+    tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+    float clippingThresCell = 10.f;
+    float clippingThresProj = 0.f;
+    bool isTimeMajor = false;
+
+    UnidirectionalSequenceLstmTestImpl<float>(backends,
+                                              ::tflite::TensorType_FLOAT32,
+                                              batchSize,
+                                              timeSize,
+                                              inputSize,
+                                              outputSize,
+                                              numUnits,
+                                              hasInputToInputWeights,
+                                              inputToInputWeights,
+                                              inputToForgetWeights,
+                                              inputToCellWeights,
+                                              inputToOutputWeights,
+                                              hasRecurrentToInputWeights,
+                                              recurrentToInputWeights,
+                                              recurrentToForgetWeights,
+                                              recurrentToCellWeights,
+                                              recurrentToOutputWeights,
+                                              hasCellToInputWeights,
+                                              cellToInputWeights,
+                                              hasCellToForgetWeights,
+                                              cellToForgetWeights,
+                                              hasCellToOutputWeights,
+                                              cellToOutputWeights,
+                                              hasInputGateBias,
+                                              inputGateBias,
+                                              forgetGateBias,
+                                              cellBias,
+                                              outputGateBias,
+                                              hasProjectionWeights,
+                                              projectionWeights,
+                                              hasProjectionBias,
+                                              projectionBias,
+                                              hasInputLayerNormWeights,
+                                              inputLayerNormWeights,
+                                              hasForgetLayerNormWeights,
+                                              forgetLayerNormWeights,
+                                              hasCellLayerNormWeights,
+                                              cellLayerNormWeights,
+                                              hasOutputLayerNormWeights,
+                                              outputLayerNormWeights,
+                                              inputValues,
+                                              expectedOutputValues,
+                                              activationFunction,
+                                              clippingThresCell,
+                                              clippingThresProj,
+                                              isTimeMajor);
+}
+
+void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest(
+    std::vector<armnn::BackendId>& backends)
+{
+    int32_t batchSize = 3;
+    int32_t timeSize = 2;
+    int32_t inputSize = 3;
+    int32_t outputSize = 4;
+    int32_t numUnits = 5;
+
+    //tensorInfo15
+    bool hasInputToInputWeights = true;
+    std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
+                                               -0.117484632f, 0.3298470976f, -0.1179017122f,
+                                               0.214305695f, 0.42135173085f, 0.003878414626f,
+                                               -0.348303917f, -0.1881275477f, 0.0343011027f,
+                                               -0.38837709614f, -0.05636804124f, 0.4259087456f};
+
+    std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
+                                                -0.3810434485f, 0.268383264f, -0.009807467424f,
+                                                -0.3522925403f, -0.24275735512f, -0.28344226125f,
+                                                0.13512269116f, -0.4932442977f, -0.10039821991f,
+                                                0.2726137042f, 0.09216640889f, -0.06551410215f};
+
+    std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
+                                              0.386399507f, -0.259465157985f, -0.16545993089f,
+                                              -0.4230232555f, 0.341664791103f, -0.18127849691f,
+                                              -0.2277662414f, -0.55275535589f, 0.34184026718f,
+                                              0.3954237699f, -0.19407111404f, 0.30412107706f};
+
+    std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
+                                                0.53969591851f, 0.23393625035f, -0.27140527306f,
+                                                0.50009280443f, 0.07511717046f, 0.3998299249f,
+                                                -0.51717478049f, 0.1889653282f, -0.367323637f,
+                                                -0.12584099173f, -0.12319286912f, 0.2407919466f};
+
+    //tensorInfo20
+    bool hasRecurrentToInputWeights = true;
+    std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
+                                                   -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
+                                                   0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
+                                                   0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f,
+                                                   0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f };
+
+    std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
+                                                    -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
+                                                    -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
+                                                    -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f,
+                                                    0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f };
+
+    std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
+                                                  -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
+                                                  0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
+                                                  0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f,
+                                                  0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f };
+
+    std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
+                                                    -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
+                                                    0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
+                                                    -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f,
+                                                    0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f };
+    // tensorInfo5
+    bool hasCellToInputWeights = true;
+    std::vector<float> cellToInputWeights = { 0.05f, 0.1f, 0.25f, 0.15f, -0.02f };
+    bool hasCellToForgetWeights = true;
+    std::vector<float> cellToForgetWeights = { -0.02f, -0.15f, -0.25f, -0.03f, 0.15f };
+    bool hasCellToOutputWeights = true;
+    std::vector<float> cellToOutputWeights = { 0.1f, -0.1f, -0.5f, 0.05f, 0.01f };
+
+    bool hasInputGateBias = true;
+    std::vector<float> inputGateBias = { 0.03f, 0.15f, 0.22f, 0.38f, 0.05f };
+    std::vector<float> forgetGateBias = { 0.1f, -0.3f, -0.2f, 0.1f, 0.4f };
+    std::vector<float> cellBias = { -0.05f, 0.72f, 0.25f, 0.08f, 0.1f };
+    std::vector<float> outputGateBias = { 0.05f, -0.01f, 0.2f, 0.1f, -0.2f };
+
+    bool hasProjectionWeights = true;
+    std::vector<float> projectionWeights = { -0.1f, 0.2f, 0.01f, -0.2f,
+                                             0.1f, 0.5f,  0.3f, 0.08f,
+                                             0.07f, 0.2f, -0.4f,  0.2f,
+                                             0.5f, -0.4f, 0.3f, -0.2f,
+                                             0.3f, 0.08f, -0.07f, 0.2f}; //{outputSize, numUnits}
+    bool hasProjectionBias = true;
+    std::vector<float> projectionBias(outputSize, 0.f);;
+
+    bool hasInputLayerNormWeights = true;
+    std::vector<float> inputLayerNormWeights = { 0.1f, 0.2f, 0.3f, 0.5f, 0.8f };
+    bool hasForgetLayerNormWeights = true;
+    std::vector<float> forgetLayerNormWeights = { 0.1f, 0.2f, 0.3f, 0.5f, 0.2f };
+    bool hasCellLayerNormWeights = true;
+    std::vector<float> cellLayerNormWeights = { 0.7f, 0.2f, 0.3f, 0.8f, 0.5f };
+    bool hasOutputLayerNormWeights = true;
+    std::vector<float> outputLayerNormWeights = { 0.6f, 0.2f, 0.2f, 0.5f, 0.1f };
+
+    std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
+                                       3., 2., 1., 2., 3., 4.,
+                                       5., 4., 3., 2., 1., 2. };
+    std::vector<float> expectedOutputValues = { 0.0642256f, 0.0343966f, 0.184122f, 0.114717f,
+                                                0.11458f, 0.0407109f, 0.300327f, 0.174301f,
+                                                0.0864761f, 0.0362912f, 0.178635f, 0.115689f,
+                                                0.108008f, 0.0386623f, 0.273471f, 0.167115f,
+                                                0.0859545f, 0.0331481f, 0.186051f, 0.11888f,
+                                                0.106649f, 0.0276847f, 0.229863f, 0.166958f };
+
+    tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+    float clippingThresCell = 10.f;
+    float clippingThresProj = 0.f;
+    bool isTimeMajor = false;
+
+    UnidirectionalSequenceLstmTestImpl<float>(backends,
+                                              ::tflite::TensorType_FLOAT32,
+                                              batchSize,
+                                              timeSize,
+                                              inputSize,
+                                              outputSize,
+                                              numUnits,
+                                              hasInputToInputWeights,
+                                              inputToInputWeights,
+                                              inputToForgetWeights,
+                                              inputToCellWeights,
+                                              inputToOutputWeights,
+                                              hasRecurrentToInputWeights,
+                                              recurrentToInputWeights,
+                                              recurrentToForgetWeights,
+                                              recurrentToCellWeights,
+                                              recurrentToOutputWeights,
+                                              hasCellToInputWeights,
+                                              cellToInputWeights,
+                                              hasCellToForgetWeights,
+                                              cellToForgetWeights,
+                                              hasCellToOutputWeights,
+                                              cellToOutputWeights,
+                                              hasInputGateBias,
+                                              inputGateBias,
+                                              forgetGateBias,
+                                              cellBias,
+                                              outputGateBias,
+                                              hasProjectionWeights,
+                                              projectionWeights,
+                                              hasProjectionBias,
+                                              projectionBias,
+                                              hasInputLayerNormWeights,
+                                              inputLayerNormWeights,
+                                              hasForgetLayerNormWeights,
+                                              forgetLayerNormWeights,
+                                              hasCellLayerNormWeights,
+                                              cellLayerNormWeights,
+                                              hasOutputLayerNormWeights,
+                                              outputLayerNormWeights,
+                                              inputValues,
+                                              expectedOutputValues,
+                                              activationFunction,
+                                              clippingThresCell,
+                                              clippingThresProj,
+                                              isTimeMajor);
+}
+
+void UnidirectionalSequenceLstmInt8Test(std::vector<armnn::BackendId>& backends)
+{
+    int32_t batchSize = 3;
+    int32_t timeSize = 2;
+    int32_t inputSize = 3;
+    int32_t outputSize = 4;
+    // cellSize and outputSize have the same size when there is no projection.
+    int32_t numUnits = outputSize;
+
+    //tensorInfo12
+    bool hasInputToInputWeights = true;
+    std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
+
+    std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
+
+    std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
+
+    std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
+
+    //tensorInfo16
+    bool hasRecurrentToInputWeights = true;
+    std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
+
+    std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
+
+    std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
+
+    std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
+
+    // tensorInfo4
+    bool hasCellToInputWeights = false;
+    std::vector<int8_t> cellToInputWeights;
+    bool hasCellToForgetWeights = false;
+    std::vector<int8_t> cellToForgetWeights;
+    bool hasCellToOutputWeights = false;
+    std::vector<int8_t> cellToOutputWeights;
+
+    bool hasInputGateBias = true;
+    std::vector<float> inputGateBias = { 0., 0., 0., 0. };
+    std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
+    std::vector<float> cellBias = { 0., 0., 0., 0. };
+    std::vector<float> outputGateBias = { 0., 0., 0., 0. };
+
+    bool hasProjectionWeights = false;
+    std::vector<int8_t> projectionWeights;
+    bool hasProjectionBias = false;
+    std::vector<float> projectionBias;
+
+    bool hasInputLayerNormWeights = false;
+    std::vector<float> inputLayerNormWeights;
+    bool hasForgetLayerNormWeights = false;
+    std::vector<float> forgetLayerNormWeights;
+    bool hasCellLayerNormWeights = false;
+    std::vector<float> cellLayerNormWeights;
+    bool hasOutputLayerNormWeights = false;
+    std::vector<float> outputLayerNormWeights;
+
+    std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
+                                       0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
+                                       0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
+
+    std::vector<float> expectedOutputValues = { -0.0142517f, -0.0198845f, -0.0120569f, -0.0116868f,
+                                                -0.0350714f, -0.0343202f, -0.047504f, -0.0569789f,
+                                                -0.0146346f, 0.0106663f, -0.0247238f, -0.0319502f,
+                                                -0.0294759f, -0.0129935f, -0.0444175f, -0.0444354f,
+                                                -0.0280855f, 0.00545101f, -0.051422f, -0.0463838f,
+                                                -0.0310702f, 0.00915739f, -0.0625207f, -0.0482648f };
+
+    tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+    float clippingThresCell = 10.f;
+    float clippingThresProj = 0.f;
+    bool isTimeMajor = false;
+
+    UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
+                                               ::tflite::TensorType_INT8,
+                                               batchSize,
+                                               timeSize,
+                                               inputSize,
+                                               outputSize,
+                                               numUnits,
+                                               hasInputToInputWeights,
+                                               inputToInputWeights,
+                                               inputToForgetWeights,
+                                               inputToCellWeights,
+                                               inputToOutputWeights,
+                                               hasRecurrentToInputWeights,
+                                               recurrentToInputWeights,
+                                               recurrentToForgetWeights,
+                                               recurrentToCellWeights,
+                                               recurrentToOutputWeights,
+                                               hasCellToInputWeights,
+                                               cellToInputWeights,
+                                               hasCellToForgetWeights,
+                                               cellToForgetWeights,
+                                               hasCellToOutputWeights,
+                                               cellToOutputWeights,
+                                               hasInputGateBias,
+                                               inputGateBias,
+                                               forgetGateBias,
+                                               cellBias,
+                                               outputGateBias,
+                                               hasProjectionWeights,
+                                               projectionWeights,
+                                               hasProjectionBias,
+                                               projectionBias,
+                                               hasInputLayerNormWeights,
+                                               inputLayerNormWeights,
+                                               hasForgetLayerNormWeights,
+                                               forgetLayerNormWeights,
+                                               hasCellLayerNormWeights,
+                                               cellLayerNormWeights,
+                                               hasOutputLayerNormWeights,
+                                               outputLayerNormWeights,
+                                               inputValues,
+                                               expectedOutputValues,
+                                               activationFunction,
+                                               clippingThresCell,
+                                               clippingThresProj,
+                                               isTimeMajor,
+                                               0.1f);
+}
+
+void UnidirectionalSequenceLstmInt8TimeMajorTest(std::vector<armnn::BackendId>& backends)
+{
+    int32_t batchSize = 3;
+    int32_t timeSize = 2;
+    int32_t inputSize = 3;
+    int32_t outputSize = 4;
+    // cellSize and outputSize have the same size when there is no projection.
+    int32_t numUnits = outputSize;
+
+    //tensorInfo12
+    bool hasInputToInputWeights = true;
+    std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
+
+    std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
+
+    std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
+
+    std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
+
+    //tensorInfo16
+    bool hasRecurrentToInputWeights = true;
+    std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
+
+    std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
+
+    std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
+
+    std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
+
+    // tensorInfo4
+    bool hasCellToInputWeights = false;
+    std::vector<int8_t> cellToInputWeights;
+    bool hasCellToForgetWeights = false;
+    std::vector<int8_t> cellToForgetWeights;
+    bool hasCellToOutputWeights = false;
+    std::vector<int8_t> cellToOutputWeights;
+
+    bool hasInputGateBias = true;
+    std::vector<float> inputGateBias = { 0., 0., 0., 0. };
+    std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
+    std::vector<float> cellBias = { 0., 0., 0., 0. };
+    std::vector<float> outputGateBias = { 0., 0., 0., 0. };
+
+    bool hasProjectionWeights = false;
+    std::vector<int8_t> projectionWeights;
+    bool hasProjectionBias = false;
+    std::vector<float> projectionBias;
+
+    bool hasInputLayerNormWeights = false;
+    std::vector<float> inputLayerNormWeights;
+    bool hasForgetLayerNormWeights = false;
+    std::vector<float> forgetLayerNormWeights;
+    bool hasCellLayerNormWeights = false;
+    std::vector<float> cellLayerNormWeights;
+    bool hasOutputLayerNormWeights = false;
+    std::vector<float> outputLayerNormWeights;
+
+    std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
+                                       0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
+                                       0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
+
+    std::vector<float> expectedOutputValues = { -0.0142517f, -0.0198845f, -0.0120122f, -0.0116868f,
+                                                -0.0261295f, -0.0188487f, -0.0345463f, -0.049733f,
+                                                -0.0146346f, 0.0106663f, -0.0247238f, -0.0319502f,
+                                                -0.0291863f, -0.0369402f, -0.0354071f, -0.0296529f,
+                                                -0.0419539f, -0.00617731f, -0.0814796f, -0.0804005f,
+                                                -0.0244737f, 0.0119905f, -0.0457527f, -0.0331862f };
+
+    tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+    float clippingThresCell = 10.f;
+    float clippingThresProj = 0.f;
+    bool isTimeMajor = true;
+
+    UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
+                                               ::tflite::TensorType_INT8,
+                                               batchSize,
+                                               timeSize,
+                                               inputSize,
+                                               outputSize,
+                                               numUnits,
+                                               hasInputToInputWeights,
+                                               inputToInputWeights,
+                                               inputToForgetWeights,
+                                               inputToCellWeights,
+                                               inputToOutputWeights,
+                                               hasRecurrentToInputWeights,
+                                               recurrentToInputWeights,
+                                               recurrentToForgetWeights,
+                                               recurrentToCellWeights,
+                                               recurrentToOutputWeights,
+                                               hasCellToInputWeights,
+                                               cellToInputWeights,
+                                               hasCellToForgetWeights,
+                                               cellToForgetWeights,
+                                               hasCellToOutputWeights,
+                                               cellToOutputWeights,
+                                               hasInputGateBias,
+                                               inputGateBias,
+                                               forgetGateBias,
+                                               cellBias,
+                                               outputGateBias,
+                                               hasProjectionWeights,
+                                               projectionWeights,
+                                               hasProjectionBias,
+                                               projectionBias,
+                                               hasInputLayerNormWeights,
+                                               inputLayerNormWeights,
+                                               hasForgetLayerNormWeights,
+                                               forgetLayerNormWeights,
+                                               hasCellLayerNormWeights,
+                                               cellLayerNormWeights,
+                                               hasOutputLayerNormWeights,
+                                               outputLayerNormWeights,
+                                               inputValues,
+                                               expectedOutputValues,
+                                               activationFunction,
+                                               clippingThresCell,
+                                               clippingThresProj,
+                                               isTimeMajor,
+                                               0.1);
+}
+
+void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest(std::vector<armnn::BackendId>& backends)
+{
+    int32_t batchSize = 3;
+    int32_t timeSize = 2;
+    int32_t inputSize = 3;
+    int32_t outputSize = 4;
+    int32_t numUnits = 4;
+
+    bool hasInputToInputWeights = true;
+    std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
+
+    std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
+
+    std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
+
+    std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
+
+    //tensorInfo16
+    bool hasRecurrentToInputWeights = true;
+    std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
+
+    std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
+
+    std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
+
+    std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
+
+    // tensorInfo4
+    bool hasCellToInputWeights = true;
+    std::vector<int8_t> cellToInputWeights = { 5, 10, 25, 15 };
+    bool hasCellToForgetWeights = true;
+    std::vector<int8_t> cellToForgetWeights = { -5, 15, 25, 3 };
+    bool hasCellToOutputWeights = true;
+    std::vector<int8_t> cellToOutputWeights = { 10, -10, -5, 50 };
+
+    bool hasInputGateBias = true;
+    std::vector<float> inputGateBias = { 0.02234832f,  0.14757581f,   0.18176508f,  0.10380666f};
+    std::vector<float> forgetGateBias = { 0.035185695f, -0.042891346f, -0.3032477f, 0.23027696f};
+    std::vector<float> cellBias = { -0.124379363f, 0.55531194f, 0.23377132f,   0.033463873f };
+    std::vector<float> outputGateBias = { 0.046159424f,  -0.12809046f, 0.03563469f, 0.12648113f };
+
+    bool hasProjectionWeights = true;
+    std::vector<int8_t> projectionWeights = { -25, 51, 3, -5, 25, 127, 77, 20, 18, 51, -10, 51, -25, 88, 77, -13 };
+    bool hasProjectionBias = true;
+    std::vector<float> projectionBias(outputSize, 0.f);
+
+    bool hasInputLayerNormWeights = false;
+    std::vector<float> inputLayerNormWeights;
+    bool hasForgetLayerNormWeights = false;
+    std::vector<float> forgetLayerNormWeights;
+    bool hasCellLayerNormWeights = false;
+    std::vector<float> cellLayerNormWeights;
+    bool hasOutputLayerNormWeights = false;
+    std::vector<float> outputLayerNormWeights;
+
+    std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
+                                       0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
+                                       0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
+
+    std::vector<float> expectedOutputValues = { 0.612103f, 1.56788f, 0.31966f, 1.42956f,
+                                                0.909718f, 3.07916f, -0.560586f, 3.8907f,
+                                                0.753671f, 1.77485f, 0.365122f, 1.60077f,
+                                                0.812644f, 2.79092f, -0.605396f, 3.61742f,
+                                                0.791857f, 1.64353f, 0.316588f, 1.55192f,
+                                                0.807265f, 2.47012f, -0.539598f, 3.25654f };
+
+    tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+    float clippingThresCell = 10.f;
+    float clippingThresProj = 0.f;
+    bool isTimeMajor = false;
+
+    UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
+                                               ::tflite::TensorType_INT8,
+                                               batchSize,
+                                               timeSize,
+                                               inputSize,
+                                               outputSize,
+                                               numUnits,
+                                               hasInputToInputWeights,
+                                               inputToInputWeights,
+                                               inputToForgetWeights,
+                                               inputToCellWeights,
+                                               inputToOutputWeights,
+                                               hasRecurrentToInputWeights,
+                                               recurrentToInputWeights,
+                                               recurrentToForgetWeights,
+                                               recurrentToCellWeights,
+                                               recurrentToOutputWeights,
+                                               hasCellToInputWeights,
+                                               cellToInputWeights,
+                                               hasCellToForgetWeights,
+                                               cellToForgetWeights,
+                                               hasCellToOutputWeights,
+                                               cellToOutputWeights,
+                                               hasInputGateBias,
+                                               inputGateBias,
+                                               forgetGateBias,
+                                               cellBias,
+                                               outputGateBias,
+                                               hasProjectionWeights,
+                                               projectionWeights,
+                                               hasProjectionBias,
+                                               projectionBias,
+                                               hasInputLayerNormWeights,
+                                               inputLayerNormWeights,
+                                               hasForgetLayerNormWeights,
+                                               forgetLayerNormWeights,
+                                               hasCellLayerNormWeights,
+                                               cellLayerNormWeights,
+                                               hasOutputLayerNormWeights,
+                                               outputLayerNormWeights,
+                                               inputValues,
+                                               expectedOutputValues,
+                                               activationFunction,
+                                               clippingThresCell,
+                                               clippingThresProj,
+                                               isTimeMajor,
+                                               0.1f);
+}
+
+void UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(std::vector<armnn::BackendId>& backends)
+{
+    int32_t batchSize = 3;
+    int32_t timeSize = 2;
+    int32_t inputSize = 3;
+    int32_t outputSize = 4;
+    // cellSize and outputSize have the same size when there is no projection.
+    int32_t numUnits = outputSize;
+
+    //tensorInfo12,
+    bool hasInputToInputWeights = false;
+    std::vector<int8_t> inputToInputWeights;
+
+    std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
+
+    std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
+
+    std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
+
+    //tensorInfo16,
+    bool hasRecurrentToInputWeights = false;
+    std::vector<int8_t> recurrentToInputWeights;
+    std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
+
+    std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
+
+    std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
+
+    // tensorInfo4
+    bool hasCellToInputWeights = false;
+    std::vector<int8_t> cellToInputWeights;
+    bool hasCellToForgetWeights = true;
+    std::vector<int8_t> cellToForgetWeights = { 47, -52, -24, 31 };
+    bool hasCellToOutputWeights = true;
+    std::vector<int8_t> cellToOutputWeights = { -17, 82, 85, -77 };
+
+    bool hasInputGateBias = false;
+    std::vector<float> inputGateBias;
+    std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
+    std::vector<float> cellBias = { 0., 0., 0., 0. };
+    std::vector<float> outputGateBias = { 0., 0., 0., 0. };
+
+   bool hasProjectionWeights = false;
+    std::vector<int8_t> projectionWeights;
+    bool hasProjectionBias = false;
+    std::vector<float> projectionBias;
+
+    bool hasInputLayerNormWeights = false;
+    std::vector<float> inputLayerNormWeights;
+    bool hasForgetLayerNormWeights = false;
+    std::vector<float> forgetLayerNormWeights;
+    bool hasCellLayerNormWeights = false;
+    std::vector<float> cellLayerNormWeights;
+    bool hasOutputLayerNormWeights = false;
+    std::vector<float> outputLayerNormWeights;
+
+    std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
+                                       0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
+                                       0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
+
+    std::vector<float> expectedOutputValues = { -0.0072104f, -0.00991171f, -0.00650478f, -0.00713055f,
+                                                -0.0191782f, -0.0161269f, -0.0233683f, -0.054299f,
+                                                -0.00783725f, 0.00635271f, -0.0126718f, -0.022613f,
+                                                -0.0161351f, -0.00775868f, -0.021054f, -0.0339778f,
+                                                -0.0146392f, 0.00330261f, -0.0258733f, -0.0407797f,
+                                                -0.0174297f, 0.0050105f, -0.0266275f, -0.0362564f };
+
+    tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+    float clippingThresCell = 10.f;
+    float clippingThresProj = 0.f;
+    bool isTimeMajor = false;
+
+    UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
+                                               ::tflite::TensorType_INT8,
+                                               batchSize,
+                                               timeSize,
+                                               inputSize,
+                                               outputSize,
+                                               numUnits,
+                                               hasInputToInputWeights,
+                                               inputToInputWeights,
+                                               inputToForgetWeights,
+                                               inputToCellWeights,
+                                               inputToOutputWeights,
+                                               hasRecurrentToInputWeights,
+                                               recurrentToInputWeights,
+                                               recurrentToForgetWeights,
+                                               recurrentToCellWeights,
+                                               recurrentToOutputWeights,
+                                               hasCellToInputWeights,
+                                               cellToInputWeights,
+                                               hasCellToForgetWeights,
+                                               cellToForgetWeights,
+                                               hasCellToOutputWeights,
+                                               cellToOutputWeights,
+                                               hasInputGateBias,
+                                               inputGateBias,
+                                               forgetGateBias,
+                                               cellBias,
+                                               outputGateBias,
+                                               hasProjectionWeights,
+                                               projectionWeights,
+                                               hasProjectionBias,
+                                               projectionBias,
+                                               hasInputLayerNormWeights,
+                                               inputLayerNormWeights,
+                                               hasForgetLayerNormWeights,
+                                               forgetLayerNormWeights,
+                                               hasCellLayerNormWeights,
+                                               cellLayerNormWeights,
+                                               hasOutputLayerNormWeights,
+                                               outputLayerNormWeights,
+                                               inputValues,
+                                               expectedOutputValues,
+                                               activationFunction,
+                                               clippingThresCell,
+                                               clippingThresProj,
+                                               isTimeMajor,
+                                               0.1);
+}
+
+void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest(
+    std::vector<armnn::BackendId>& backends)
+{
+    int32_t batchSize = 3;
+    int32_t timeSize = 2;
+    int32_t inputSize = 3;
+    int32_t outputSize = 4;
+    int32_t numUnits = 5;
+
+    bool hasInputToInputWeights = true;
+    std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3, 2, 2, -4 };
+
+    std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1, -3, -2, -4 };
+
+    std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3, 2, 5, -4 };
+
+    std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4, -4, -1, -1 };
+
+    bool hasRecurrentToInputWeights = true;
+    std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3,
+                                                    5, -1, 1, 3, -1, -1, -1, 4, 2, 3 };
+
+    std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3,
+                                                     5, -1, 1, 3, -2, -1, -1, 2, 2, 1 };
+
+    std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2,
+                                                   1, 2, 3, -2, 3, -3,  -1, -5, 1, 3 };
+
+    std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3,
+                                                     -4, -1, -1, -1, 2, -1, 5, 1, -3, -4 };
+
+    // tensorInfo5
+    bool hasCellToInputWeights = true;
+    std::vector<int8_t> cellToInputWeights = { 5, 3, 8, -5, 2 };
+    bool hasCellToForgetWeights = true;
+    std::vector<int8_t> cellToForgetWeights = { -2, -7, 5, -3, 4 };
+    bool hasCellToOutputWeights = true;
+    std::vector<int8_t> cellToOutputWeights = { 9, -10 , -5, 5, 1 };
+
+    bool hasInputGateBias = true;
+    std::vector<float> inputGateBias = { 0.03f, 0.15f, 0.22f, 0.38f, 0.05f };
+    std::vector<float> forgetGateBias = { 0.1f, -0.3f, -0.2f, 0.1f, 0.4f };
+    std::vector<float> cellBias = { -0.05f, 0.72f, 0.25f, 0.08f, 0.1f };
+    std::vector<float> outputGateBias = { 0.05f, -0.01f, 0.2f, 0.1f, -0.2f };
+
+    bool hasProjectionWeights = true;
+    std::vector<int8_t> projectionWeights = { -1, 2, 1, -2, 1, 5, 3, 8, 7, 2,
+                                              -4, 2, 5, -4, 3, -2, 3, 8, -7, 2 };
+    bool hasProjectionBias = true;
+    std::vector<float> projectionBias(outputSize, 0.f);
+
+    bool hasInputLayerNormWeights = true;
+    std::vector<float> inputLayerNormWeights = { 0.1f, 0.2f, -0.3f, -0.1f, 0.5f };
+    bool hasForgetLayerNormWeights = true;
+    std::vector<float> forgetLayerNormWeights = { -0.1f, 0.2f, 0.3f, 0.5f, 0.2f };
+    bool hasCellLayerNormWeights = true;
+    std::vector<float> cellLayerNormWeights = { 0.5f, 0.2f, 0.3f, 0.4f, -0.5f };
+    bool hasOutputLayerNormWeights = true;
+    std::vector<float> outputLayerNormWeights = { 0.6f, -0.2f, -0.2f, 0.5f, 0.1f };
+
+    std::vector<float> inputValues = { 1., 8., 3., 4., 5., 4.,
+                                       3., 2., 1., 2., 3., 4.,
+                                       5., 4., 3., 2., 1., 2. };
+
+    std::vector<float> expectedOutputValues = { 0.0471276f, 0.0168155f, 0.0789885f, 0.16550f,
+                                                0.0643133f, -0.0400722f, 0.100593f, 0.197722f,
+                                                0.0465562f, -0.0600682f, 0.0622087f, 0.115053f,
+                                                0.056287f, -0.0566218f, 0.0856832f, 0.148484f,
+                                                0.0457859f, -0.0588112f, 0.0623636f, 0.114333f,
+                                                0.0509271f, -0.0754262f, 0.058600f, 0.0801288f };
+
+    tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
+    float clippingThresCell = 10.f;
+    float clippingThresProj = 0.f;
+    bool isTimeMajor = false;
+
+    UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
+                                               ::tflite::TensorType_INT8,
+                                               batchSize,
+                                               timeSize,
+                                               inputSize,
+                                               outputSize,
+                                               numUnits,
+                                               hasInputToInputWeights,
+                                               inputToInputWeights,
+                                               inputToForgetWeights,
+                                               inputToCellWeights,
+                                               inputToOutputWeights,
+                                               hasRecurrentToInputWeights,
+                                               recurrentToInputWeights,
+                                               recurrentToForgetWeights,
+                                               recurrentToCellWeights,
+                                               recurrentToOutputWeights,
+                                               hasCellToInputWeights,
+                                               cellToInputWeights,
+                                               hasCellToForgetWeights,
+                                               cellToForgetWeights,
+                                               hasCellToOutputWeights,
+                                               cellToOutputWeights,
+                                               hasInputGateBias,
+                                               inputGateBias,
+                                               forgetGateBias,
+                                               cellBias,
+                                               outputGateBias,
+                                               hasProjectionWeights,
+                                               projectionWeights,
+                                               hasProjectionBias,
+                                               projectionBias,
+                                               hasInputLayerNormWeights,
+                                               inputLayerNormWeights,
+                                               hasForgetLayerNormWeights,
+                                               forgetLayerNormWeights,
+                                               hasCellLayerNormWeights,
+                                               cellLayerNormWeights,
+                                               hasOutputLayerNormWeights,
+                                               outputLayerNormWeights,
+                                               inputValues,
+                                               expectedOutputValues,
+                                               activationFunction,
+                                               clippingThresCell,
+                                               clippingThresProj,
+                                               isTimeMajor,
+                                               0.1);
+}
+
+TEST_SUITE("UnidirectionalSequenceLstmTest_CpuRefTests")
+{
+
+TEST_CASE ("UnidirectionalSequenceLstmTest_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    UnidirectionalSequenceLstmTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmTimeMajorTest_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    UnidirectionalSequenceLstmTimeMajorTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmInt8Test_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    UnidirectionalSequenceLstmInt8Test(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmTimeInt8TimeMajorTest_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    UnidirectionalSequenceLstmInt8TimeMajorTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(backends);
+}
+
+TEST_CASE ("UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest_CpuRef_Test")
+{
+    std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest(backends);
+}
+
+} //End of TEST_SUITE("UnidirectionalSequenceLstmTest_CpuRef")
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp b/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp
new file mode 100644
index 0000000..0ff04e7
--- /dev/null
+++ b/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp
@@ -0,0 +1,742 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+#include <tensorflow/lite/c/common.h>
+
+#include <doctest/doctest.h>
+
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <armnn/Types.hpp>
+
+#include <initializer_list>
+#include <iterator>
+#include <vector>
+
+namespace
+{
+
+template<typename T>
+std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType tensorType,
+                                                              int32_t batchSize,
+                                                              int32_t timeSize,
+                                                              int32_t inputSize,
+                                                              int32_t outputSize,
+                                                              int32_t numUnits,
+                                                              bool hasInputToInputWeights,
+                                                              const std::vector<T>& inputToInputWeights,
+                                                              const std::vector<T>& inputToForgetWeights,
+                                                              const std::vector<T>& inputToCellWeights,
+                                                              const std::vector<T>& inputToOutputWeights,
+                                                              bool hasRecurrentToInputWeights,
+                                                              const std::vector<T>& recurrentToInputWeights,
+                                                              const std::vector<T>& recurrentToForgetWeights,
+                                                              const std::vector<T>& recurrentToCellWeights,
+                                                              const std::vector<T>& recurrentToOutputWeights,
+                                                              bool hasCellToInputWeights,
+                                                              const std::vector<T>& cellToInputWeights,
+                                                              bool hasCellToForgetWeights,
+                                                              const std::vector<T>& cellToForgetWeights,
+                                                              bool hasCellToOutputWeights,
+                                                              const std::vector<T>& cellToOutputWeights,
+                                                              bool hasInputGateBias,
+                                                              const std::vector<float>& inputGateBias,
+                                                              const std::vector<float>& forgetGateBias,
+                                                              const std::vector<float>& cellBias,
+                                                              const std::vector<float>& outputGateBias,
+                                                              bool hasProjectionWeights,
+                                                              const std::vector<T>& projectionWeights,
+                                                              bool hasProjectionBias,
+                                                              const std::vector<float>& projectionBias,
+                                                              bool hasInputLayerNormWeights,
+                                                              const std::vector<float>& inputLayerNormWeights,
+                                                              bool hasForgetLayerNormWeights,
+                                                              const std::vector<float>& forgetLayerNormWeights,
+                                                              bool hasCellLayerNormWeights,
+                                                              const std::vector<float>& cellLayerNormWeights,
+                                                              bool hasOutputLayerNormWeights,
+                                                              const std::vector<float>& outputLayerNormWeights,
+                                                              tflite::ActivationFunctionType activationFunction,
+                                                              float clippingThresCell,
+                                                              float clippingThresProj,
+                                                              bool isTimeMajor,
+                                                              float quantScale,
+                                                              int quantOffset = 0)
+{
+
+    std::vector<int32_t> tensorInfo0{};
+    std::vector<int32_t> tensorInfoNumUnits{numUnits};
+    std::vector<int32_t> tensorInfoInputSize{numUnits, inputSize};
+    std::vector<int32_t> tensorInfoOutputSize{numUnits, outputSize};
+
+    std::vector<int32_t> inputShape;
+    std::vector<int32_t> outputShape;
+    if (isTimeMajor)
+    {
+        inputShape  = {timeSize, batchSize, inputSize};
+        outputShape = {timeSize, batchSize, outputSize};
+    }
+    else
+    {
+        inputShape  = {batchSize, timeSize, inputSize};
+        outputShape = {batchSize, timeSize, outputSize};
+    }
+    std::vector<int32_t> outputStateInDimensions{batchSize, outputSize};
+    std::vector<int32_t> cellStateInDimensions{batchSize, numUnits};
+    std::vector<int32_t> projectionWeightDimensions{outputSize, numUnits};
+    std::vector<int32_t> projectionBiasDimensions{outputSize};
+
+    std::vector<int> operatorInputs;
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder                   flatBufferBuilder;
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    std::vector<flatbuffers::Offset<Tensor>>         tensors;
+
+    auto quantizationParameters =
+             CreateQuantizationParameters(flatBufferBuilder,
+                                          0,
+                                          0,
+                                          flatBufferBuilder.CreateVector<float>({1.0f}),
+                                          flatBufferBuilder.CreateVector<int64_t>({0}));
+
+    auto weightQuantizationParameters =
+             CreateQuantizationParameters(flatBufferBuilder,
+                                          0,
+                                          0,
+                                          flatBufferBuilder.CreateVector<float>({quantScale}),
+                                          flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
+
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
+                                                                           inputShape.size()),
+                                   ::tflite::TensorType_FLOAT32,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("input_0")));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    if (hasInputToInputWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(
+                             reinterpret_cast<const uint8_t*>(inputToInputWeights.data()),
+                             sizeof(T) * inputToInputWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
+                                                                               tensorInfoInputSize.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("inputToInputWeights"),
+                                       weightQuantizationParameters));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(
+                         reinterpret_cast<const uint8_t*>(inputToForgetWeights.data()),
+                         sizeof(T) * inputToForgetWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
+                                                                           tensorInfoInputSize.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("inputToForgetWeights"),
+                                   weightQuantizationParameters));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(
+                         reinterpret_cast<const uint8_t*>(inputToCellWeights.data()),
+                         sizeof(T) * inputToCellWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
+                                                                           tensorInfoInputSize.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("inputToCellWeights"),
+                                   weightQuantizationParameters));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(
+                         reinterpret_cast<const uint8_t*>(inputToOutputWeights.data()),
+                         sizeof(T) * inputToOutputWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
+                                                                           tensorInfoInputSize.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("inputToOutputWeights"),
+                                   weightQuantizationParameters));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    if (hasRecurrentToInputWeights)
+    {
+        buffers.push_back(CreateBuffer(
+            flatBufferBuilder,
+            flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(recurrentToInputWeights.data()),
+                                           sizeof(T) * recurrentToInputWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
+                                                                               tensorInfoOutputSize.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("recurrentToInputWeights"),
+                                       weightQuantizationParameters));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                        recurrentToForgetWeights.data()),
+                                                    sizeof(T) * recurrentToForgetWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
+                                                                           tensorInfoOutputSize.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("recurrentToForgetWeights"),
+                                   weightQuantizationParameters));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                        recurrentToCellWeights.data()),
+                                                    sizeof(T) * recurrentToCellWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
+                                                                           tensorInfoOutputSize.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("recurrentToCellWeights"),
+                                   weightQuantizationParameters));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                        recurrentToOutputWeights.data()),
+                                                    sizeof(T) * recurrentToOutputWeights.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
+                                                                           tensorInfoOutputSize.size()),
+                                   tensorType,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("recurrentToOutputWeights"),
+                                   weightQuantizationParameters));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    if (hasCellToInputWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                            cellToInputWeights.data()),
+                                                        sizeof(T) * cellToInputWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+                                                                               tensorInfoNumUnits.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("cellToInputWeights"),
+                                       weightQuantizationParameters));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasCellToForgetWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                            cellToForgetWeights.data()),
+                                                        sizeof(T) * cellToForgetWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+                                                                               tensorInfoNumUnits.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("cellToForgetWeights"),
+                                       weightQuantizationParameters));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasCellToOutputWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                            cellToOutputWeights.data()),
+                                                        sizeof(T) * cellToOutputWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+                                                                               tensorInfoNumUnits.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("cellToOutputWeights"),
+                                       weightQuantizationParameters));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasInputGateBias)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputGateBias.data()),
+                                                        sizeof(float) * inputGateBias.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+                                                                               tensorInfoNumUnits.size()),
+                                       ::tflite::TensorType_FLOAT32,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("inputGateBias")));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(forgetGateBias.data()),
+                                                    sizeof(float) * forgetGateBias.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+                                                                           tensorInfoNumUnits.size()),
+                                   ::tflite::TensorType_FLOAT32,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("forgetGateBias")));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellBias.data()),
+                                                    sizeof(float) * cellBias.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+                                                                           tensorInfoNumUnits.size()),
+                                   ::tflite::TensorType_FLOAT32,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("cellBias")));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    buffers.push_back(
+        CreateBuffer(flatBufferBuilder,
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(outputGateBias.data()),
+                                                    sizeof(float) * outputGateBias.size())));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+                                                                           tensorInfoNumUnits.size()),
+                                   ::tflite::TensorType_FLOAT32,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("outputGateBias")));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    if (hasProjectionWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(
+                             reinterpret_cast<const uint8_t*>(projectionWeights.data()),
+                             sizeof(T) * projectionWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(projectionWeightDimensions.data(),
+                                                                               projectionWeightDimensions.size()),
+                                       tensorType,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("projectionWeights"),
+                                       weightQuantizationParameters));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasProjectionBias)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(
+                             reinterpret_cast<const uint8_t*>(projectionBias.data()),
+                             sizeof(float) * projectionBias.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(projectionBiasDimensions.data(),
+                                                                               projectionBiasDimensions.size()),
+                                       ::tflite::TensorType_FLOAT32,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("projectionBias")));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(outputStateInDimensions.data(),
+                                                                           outputStateInDimensions.size()),
+                                   ::tflite::TensorType_FLOAT32,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("outputStateInInfo"),
+                                   quantizationParameters,
+                                   true));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(cellStateInDimensions.data(),
+                                                                           cellStateInDimensions.size()),
+                                   ::tflite::TensorType_FLOAT32,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("cellStateInInfo"),
+                                   quantizationParameters,
+                                   true));
+    operatorInputs.push_back(tensors.size() - 1);
+
+    if (hasInputLayerNormWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(
+                             reinterpret_cast<const uint8_t*>(inputLayerNormWeights.data()),
+                             sizeof(float) * inputLayerNormWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+                                                                               tensorInfoNumUnits.size()),
+                                       ::tflite::TensorType_FLOAT32,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("inputLayerNormWeights")));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasForgetLayerNormWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(
+                             reinterpret_cast<const uint8_t*>(forgetLayerNormWeights.data()),
+                             sizeof(float) * forgetLayerNormWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+                                                                               tensorInfoNumUnits.size()),
+                                       ::tflite::TensorType_FLOAT32,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("forgetLayerNormWeights")));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasCellLayerNormWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                            cellLayerNormWeights.data()),
+                                                        sizeof(float) * cellLayerNormWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+                                                                               tensorInfoNumUnits.size()),
+                                       ::tflite::TensorType_FLOAT32,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("cellLayerNormWeights")));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+
+    if (hasOutputLayerNormWeights)
+    {
+        buffers.push_back(
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(
+                             reinterpret_cast<const uint8_t*>(outputLayerNormWeights.data()),
+                             sizeof(float) * outputLayerNormWeights.size())));
+        tensors.push_back(CreateTensor(flatBufferBuilder,
+                                       flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
+                                                                               tensorInfoNumUnits.size()),
+                                       ::tflite::TensorType_FLOAT32,
+                                       buffers.size() - 1,
+                                       flatBufferBuilder.CreateString("outputLayerNormWeights")));
+        operatorInputs.push_back(tensors.size() - 1);
+    }
+    else
+    {
+        operatorInputs.push_back(kTfLiteOptionalTensor);
+    }
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    tensors.push_back(CreateTensor(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
+                                                                           outputShape.size()),
+                                   ::tflite::TensorType_FLOAT32,
+                                   buffers.size() - 1,
+                                   flatBufferBuilder.CreateString("output")));
+    std::vector<int> operatorOutputs;
+    operatorOutputs.push_back(tensors.size() - 1);
+
+    // create operator
+    tflite::BuiltinOptions    operatorBuiltinOptionsType = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions     =
+                                  CreateUnidirectionalSequenceLSTMOptions(flatBufferBuilder,
+                                                                          activationFunction,
+                                                                          clippingThresCell,
+                                                                          clippingThresProj,
+                                                                          isTimeMajor).Union();
+
+    flatbuffers::Offset<Operator> lstmOperator =
+                                      CreateOperator(flatBufferBuilder,
+                                                     0,
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
+                                                                                             operatorInputs.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+                                                                                             operatorOutputs.size()),
+                                                     operatorBuiltinOptionsType, operatorBuiltinOptions);
+
+    flatbuffers::Offset<SubGraph> subgraph =
+                                      CreateSubGraph(flatBufferBuilder,
+                                                     flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
+                                                                                             operatorInputs.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+                                                                                             operatorOutputs.size()),
+                                                     flatBufferBuilder.CreateVector(&lstmOperator, 1));
+
+    flatbuffers::Offset<flatbuffers::String> modelDescription =
+                                                 flatBufferBuilder.CreateString(
+                                                     "ArmnnDelegate: UnidirectionalSequenceLSTM Operator Model");
+    flatbuffers::Offset<OperatorCode> operatorCode =
+                                                 CreateOperatorCode(flatBufferBuilder,
+                                                 tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM);
+
+    flatbuffers::Offset<Model> flatbufferModel =
+                                   CreateModel(flatBufferBuilder,
+                                               TFLITE_SCHEMA_VERSION,
+                                               flatBufferBuilder.CreateVector(&operatorCode, 1),
+                                               flatBufferBuilder.CreateVector(&subgraph, 1),
+                                               modelDescription,
+                                               flatBufferBuilder.CreateVector(buffers));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template<typename T>
+void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
+                                        tflite::TensorType tensorType,
+                                        int32_t batchSize,
+                                        int32_t timeSize,
+                                        int32_t inputSize,
+                                        int32_t outputSize,
+                                        int32_t numUnits,
+                                        bool hasInputToInputWeights,
+                                        const std::vector<T>& inputToInputWeights,
+                                        const std::vector<T>& inputToForgetWeights,
+                                        const std::vector<T>& inputToCellWeights,
+                                        const std::vector<T>& inputToOutputWeights,
+                                        bool hasRecurrentToInputWeights,
+                                        const std::vector<T>& recurrentToInputWeights,
+                                        const std::vector<T>& recurrentToForgetWeights,
+                                        const std::vector<T>& recurrentToCellWeights,
+                                        const std::vector<T>& recurrentToOutputWeights,
+                                        bool hasCellToInputWeights,
+                                        const std::vector<T>& cellToInputWeights,
+                                        bool hasCellToForgetWeights,
+                                        const std::vector<T>& cellToForgetWeights,
+                                        bool hasCellToOutputWeights,
+                                        const std::vector<T>& cellToOutputWeights,
+                                        bool hasInputGateBias,
+                                        const std::vector<float>& inputGateBias,
+                                        const std::vector<float>& forgetGateBias,
+                                        const std::vector<float>& cellBias,
+                                        const std::vector<float>& outputGateBias,
+                                        bool hasProjectionWeights,
+                                        const std::vector<T>& projectionWeights,
+                                        bool hasProjectionBias,
+                                        const std::vector<float>& projectionBias,
+                                        bool hasInputLayerNormWeights,
+                                        const std::vector<float>& inputLayerNormWeights,
+                                        bool hasForgetLayerNormWeights,
+                                        const std::vector<float>& forgetLayerNormWeights,
+                                        bool hasCellLayerNormWeights,
+                                        const std::vector<float>& cellLayerNormWeights,
+                                        bool hasOutputLayerNormWeights,
+                                        const std::vector<float>& outputLayerNormWeights,
+                                        std::vector<float>& inputValues,
+                                        std::vector<float>& expectedOutputValues,
+                                        tflite::ActivationFunctionType activationFunction,
+                                        float clippingThresCell,
+                                        float clippingThresProj,
+                                        bool isTimeMajor,
+                                        float quantScale = 0.1f)
+{
+    using namespace tflite;
+
+    std::vector<char> modelBuffer = CreateUnidirectionalSequenceLstmTfLiteModel(tensorType,
+                                                                                batchSize,
+                                                                                timeSize,
+                                                                                inputSize,
+                                                                                outputSize,
+                                                                                numUnits,
+                                                                                hasInputToInputWeights,
+                                                                                inputToInputWeights,
+                                                                                inputToForgetWeights,
+                                                                                inputToCellWeights,
+                                                                                inputToOutputWeights,
+                                                                                hasRecurrentToInputWeights,
+                                                                                recurrentToInputWeights,
+                                                                                recurrentToForgetWeights,
+                                                                                recurrentToCellWeights,
+                                                                                recurrentToOutputWeights,
+                                                                                hasCellToInputWeights,
+                                                                                cellToInputWeights,
+                                                                                hasCellToForgetWeights,
+                                                                                cellToForgetWeights,
+                                                                                hasCellToOutputWeights,
+                                                                                cellToOutputWeights,
+                                                                                hasInputGateBias,
+                                                                                inputGateBias,
+                                                                                forgetGateBias,
+                                                                                cellBias,
+                                                                                outputGateBias,
+                                                                                hasProjectionWeights,
+                                                                                projectionWeights,
+                                                                                hasProjectionBias,
+                                                                                projectionBias,
+                                                                                hasInputLayerNormWeights,
+                                                                                inputLayerNormWeights,
+                                                                                hasForgetLayerNormWeights,
+                                                                                forgetLayerNormWeights,
+                                                                                hasCellLayerNormWeights,
+                                                                                cellLayerNormWeights,
+                                                                                hasOutputLayerNormWeights,
+                                                                                outputLayerNormWeights,
+                                                                                activationFunction,
+                                                                                clippingThresCell,
+                                                                                clippingThresProj,
+                                                                                isTimeMajor,
+                                                                                quantScale);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+                                   theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                                                    armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        tfLiteDelageInputData[i] = inputValues[i];
+    }
+
+    auto armnnDelegateInputId   = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
+    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    {
+        armnnDelegateInputData[i] = inputValues[i];
+    }
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    auto tfLiteDelegateOutputId   = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
+    auto armnnDelegateOutputId    = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData  = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+
+    if (tensorType == ::tflite::TensorType_INT8)
+    {
+        // Allow 2% tolerance for Quantized weights
+        armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData,
+                                   expectedOutputValues.size(), 2);
+        armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData,
+                                   expectedOutputValues.size(), 2);
+        armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData,
+                                   expectedOutputValues.size(), 2);
+    }
+    else
+    {
+        armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData,
+                                   expectedOutputValues.size());
+        armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData,
+                                   expectedOutputValues.size());
+        armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+    }
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/UnpackTest.cpp b/delegate/test/UnpackTest.cpp
new file mode 100644
index 0000000..7c4b12a
--- /dev/null
+++ b/delegate/test/UnpackTest.cpp
@@ -0,0 +1,179 @@
+//
+// Copyright © 2021,2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "UnpackTestHelper.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <schema_generated.h>
+
+#include <doctest/doctest.h>
+
+namespace armnnDelegate
+{
+
+template <typename T>
+void UnpackAxis0Num4Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 4, 1, 6 };
+    std::vector<int32_t> expectedOutputShape { 1, 6 };
+
+    std::vector<T> inputValues { 1, 2, 3, 4, 5, 6,
+                                 7, 8, 9, 10, 11, 12,
+                                 13, 14, 15, 16, 17, 18,
+                                 19, 20, 21, 22, 23, 24 };
+
+    std::vector<T> expectedOutputValues0 { 1, 2, 3, 4, 5, 6 };
+    std::vector<T> expectedOutputValues1 { 7, 8, 9, 10, 11, 12 };
+    std::vector<T> expectedOutputValues2 { 13, 14, 15, 16, 17, 18 };
+    std::vector<T> expectedOutputValues3 { 19, 20, 21, 22, 23, 24 };
+
+    std::vector<std::vector<T>> expectedOutputValues{ expectedOutputValues0,
+                                                      expectedOutputValues1,
+                                                      expectedOutputValues2,
+                                                      expectedOutputValues3 };
+
+    UnpackTest<T>(tflite::BuiltinOperator_UNPACK,
+                  tensorType,
+                  backends,
+                  inputShape,
+                  expectedOutputShape,
+                  inputValues,
+                  expectedOutputValues,
+                  0);
+}
+
+template <typename T>
+void UnpackAxis2Num6Test(tflite::TensorType tensorType, std::vector<armnn::BackendId>& backends)
+{
+    std::vector<int32_t> inputShape { 4, 1, 6 };
+    std::vector<int32_t> expectedOutputShape { 4, 1 };
+
+    std::vector<T> inputValues { 1, 2, 3, 4, 5, 6,
+                                 7, 8, 9, 10, 11, 12,
+                                 13, 14, 15, 16, 17, 18,
+                                 19, 20, 21, 22, 23, 24 };
+
+    std::vector<T> expectedOutputValues0 { 1, 7, 13, 19 };
+    std::vector<T> expectedOutputValues1 { 2, 8, 14, 20 };
+    std::vector<T> expectedOutputValues2 { 3, 9, 15, 21 };
+    std::vector<T> expectedOutputValues3 { 4, 10, 16, 22 };
+    std::vector<T> expectedOutputValues4 { 5, 11, 17, 23 };
+    std::vector<T> expectedOutputValues5 { 6, 12, 18, 24 };
+
+    std::vector<std::vector<T>> expectedOutputValues{ expectedOutputValues0,
+                                                      expectedOutputValues1,
+                                                      expectedOutputValues2,
+                                                      expectedOutputValues3,
+                                                      expectedOutputValues4,
+                                                      expectedOutputValues5 };
+
+    UnpackTest<T>(tflite::BuiltinOperator_UNPACK,
+                  tensorType,
+                  backends,
+                  inputShape,
+                  expectedOutputShape,
+                  inputValues,
+                  expectedOutputValues,
+                  2);
+}
+
+TEST_SUITE("Unpack_CpuRefTests")
+{
+
+// Fp32
+TEST_CASE ("Unpack_Fp32_Axis0_Num4_CpuRef_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Unpack_Fp32_Axis2_Num6_CpuRef_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+// Uint8
+TEST_CASE ("Unpack_Uint8_Axis0_Num4_CpuRef_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("Unpack_Uint8_Axis2_Num6_CpuRef_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+} // End of Unpack_CpuRefTests
+
+TEST_SUITE("Unpack_CpuAccTests")
+{
+
+// Fp32
+TEST_CASE ("Unpack_Fp32_Axis0_Num4_CpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Unpack_Fp32_Axis2_Num6_CpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+// Uint8
+TEST_CASE ("Unpack_Uint8_Axis0_Num4_CpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("Unpack_Uint8_Axis2_Num6_CpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
+UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+} // End of Unpack_CpuAccTests
+
+TEST_SUITE("Unpack_GpuAccTests")
+{
+
+// Fp32
+TEST_CASE ("Unpack_Fp32_Axis0_Num4_GpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+UnpackAxis0Num4Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+TEST_CASE ("Unpack_Fp32_Axis2_Num6_GpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+UnpackAxis2Num6Test<float>(tflite::TensorType_FLOAT32, backends);
+}
+
+// Uint8
+TEST_CASE ("Unpack_Uint8_Axis0_Num4_GpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+UnpackAxis0Num4Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+TEST_CASE ("Unpack_Uint8_Axis2_Num6_GpuAcc_Test")
+{
+std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
+UnpackAxis2Num6Test<uint8_t>(tflite::TensorType_UINT8, backends);
+}
+
+} // End of Unpack_GpuAccTests
+
+// End of Unpack Test Suite
+
+} // namespace armnnDelegate
\ No newline at end of file
diff --git a/delegate/test/UnpackTestHelper.hpp b/delegate/test/UnpackTestHelper.hpp
new file mode 100644
index 0000000..a4c6bc0
--- /dev/null
+++ b/delegate/test/UnpackTestHelper.hpp
@@ -0,0 +1,188 @@
+//
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "TestUtils.hpp"
+
+#include <armnn_delegate.hpp>
+
+#include <flatbuffers/flatbuffers.h>
+#include <tensorflow/lite/interpreter.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/model.h>
+#include <schema_generated.h>
+#include <tensorflow/lite/version.h>
+
+#include <doctest/doctest.h>
+
+#include <string>
+
+namespace
+{
+
+std::vector<char> CreateUnpackTfLiteModel(tflite::BuiltinOperator unpackOperatorCode,
+                                          tflite::TensorType tensorType,
+                                          std::vector<int32_t>& inputTensorShape,
+                                          const std::vector <int32_t>& outputTensorShape,
+                                          const int32_t outputTensorNum,
+                                          unsigned int axis = 0,
+                                          float quantScale = 1.0f,
+                                          int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+
+    auto quantizationParameters =
+        CreateQuantizationParameters(flatBufferBuilder,
+                                     0,
+                                     0,
+                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    const std::vector<int32_t> operatorInputs{ 0 };
+    std::vector<int32_t> operatorOutputs{};
+    const std::vector<int> subgraphInputs{ 0 };
+    std::vector<int> subgraphOutputs{};
+
+    std::vector<flatbuffers::Offset<Tensor>> tensors(outputTensorNum + 1);
+
+    // Create input tensor
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
+                                                                      inputTensorShape.size()),
+                                                                      tensorType,
+                                                                      1,
+                                                                      flatBufferBuilder.CreateString("input"),
+                                                                      quantizationParameters);
+
+    for (int i = 0; i < outputTensorNum; ++i)
+    {
+        tensors[i + 1] = CreateTensor(flatBufferBuilder,
+                                  flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                          outputTensorShape.size()),
+                                  tensorType,
+                                      (i + 2),
+                                  flatBufferBuilder.CreateString("output" + std::to_string(i)),
+                                  quantizationParameters);
+
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
+        operatorOutputs.push_back(i + 1);
+        subgraphOutputs.push_back(i + 1);
+    }
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = tflite::BuiltinOptions_UnpackOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions =
+        CreateUnpackOptions(flatBufferBuilder, outputTensorNum, axis).Union();
+
+    flatbuffers::Offset <Operator> unpackOperator =
+        CreateOperator(flatBufferBuilder,
+                       0,
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       operatorBuiltinOptionsType,
+                       operatorBuiltinOptions);
+
+    flatbuffers::Offset <SubGraph> subgraph =
+        CreateSubGraph(flatBufferBuilder,
+                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(&unpackOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+        flatBufferBuilder.CreateString("ArmnnDelegate: Unpack Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, unpackOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+        CreateModel(flatBufferBuilder,
+                    TFLITE_SCHEMA_VERSION,
+                    flatBufferBuilder.CreateVector(&operatorCode, 1),
+                    flatBufferBuilder.CreateVector(&subgraph, 1),
+                    modelDescription,
+                    flatBufferBuilder.CreateVector(buffers));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void UnpackTest(tflite::BuiltinOperator unpackOperatorCode,
+              tflite::TensorType tensorType,
+              std::vector<armnn::BackendId>& backends,
+              std::vector<int32_t>& inputShape,
+              std::vector<int32_t>& expectedOutputShape,
+              std::vector<T>& inputValues,
+              std::vector<std::vector<T>>& expectedOutputValues,
+              unsigned int axis = 0,
+              float quantScale = 1.0f,
+              int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateUnpackTfLiteModel(unpackOperatorCode,
+                                                            tensorType,
+                                                            inputShape,
+                                                            expectedOutputShape,
+                                                            expectedOutputValues.size(),
+                                                            axis,
+                                                            quantScale,
+                                                            quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+              (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+                                    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                                                     armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
+    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+
+
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    // Compare output data
+    for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
+    {
+        armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
+                                            armnnDelegateInterpreter,
+                                            expectedOutputShape,
+                                            expectedOutputValues[i],
+                                            i);
+    }
+
+    armnnDelegateInterpreter.reset(nullptr);
+}
+
+} // anonymous namespace
\ No newline at end of file