IVGCVSW-7562 Implement DelegateTestInterpreter for classic delegate

 * Updated all tests to use new DelegateTestInterpreter.
 * Fixed some unit tests where the shape was incorrect.
 * Add file identifier to FlatBuffersBuilder, as it is required for
   validation when creating the model using new API.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: I1c4f5464367b35d4528571fa94d14bfaef18fb4d
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index 433cee6..73df68f 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -143,6 +143,9 @@
         test/ConvolutionTestHelper.hpp
         test/DelegateOptionsTest.cpp
         test/DelegateOptionsTestHelper.hpp
+        classic/src/test/DelegateTestInterpreter.cpp
+        common/src/test/DelegateTestInterpreter.hpp
+        common/src/test/DelegateTestInterpreterUtils.hpp
         test/DepthwiseConvolution2dTest.cpp
         test/ElementwiseBinaryTest.cpp
         test/ElementwiseBinaryTestHelper.hpp
@@ -236,6 +239,7 @@
         add_executable(DelegateUnitTests ${armnnDelegate_unittest_sources})
 
         target_include_directories(DelegateUnitTests SYSTEM PRIVATE "${TF_LITE_SCHEMA_INCLUDE_PATH}")
+        target_include_directories(DelegateUnitTests SYSTEM PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/common/src/test")
 
         # Add half library from armnn third-party libraries
         target_link_libraries(DelegateUnitTests PRIVATE thirdparty_headers)
diff --git a/delegate/classic/src/test/DelegateTestInterpreter.cpp b/delegate/classic/src/test/DelegateTestInterpreter.cpp
new file mode 100644
index 0000000..45b6cd0
--- /dev/null
+++ b/delegate/classic/src/test/DelegateTestInterpreter.cpp
@@ -0,0 +1,74 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <DelegateTestInterpreter.hpp>
+
+#include <armnn_delegate.hpp>
+
+namespace delegateTestInterpreter
+{
+
+DelegateTestInterpreter::DelegateTestInterpreter(std::vector<char>& modelBuffer,
+                                                 const std::vector<armnn::BackendId>& backends,
+                                                 const std::string& customOp,
+                                                 bool disableFallback)
+{
+    TfLiteModel* tfLiteModel = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
+
+    TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
+    if (!customOp.empty())
+    {
+        options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
+    }
+
+    // Disable fallback by default for unit tests unless specified.
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    delegateOptions.DisableTfLiteRuntimeFallback(disableFallback);
+
+    auto armnnDelegate = armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions);
+    TfLiteInterpreterOptionsAddDelegate(options, armnnDelegate);
+
+    m_TfLiteDelegate = armnnDelegate;
+    m_TfLiteInterpreter = TfLiteInterpreterCreate(tfLiteModel, options);
+
+    // The options and model can be deleted after the interpreter is created.
+    TfLiteInterpreterOptionsDelete(options);
+    TfLiteModelDelete(tfLiteModel);
+}
+
+DelegateTestInterpreter::DelegateTestInterpreter(std::vector<char>& modelBuffer,
+                                                 const armnnDelegate::DelegateOptions& delegateOptions,
+                                                 const std::string& customOp)
+{
+    TfLiteModel* tfLiteModel = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
+
+    TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
+    if (!customOp.empty())
+    {
+        options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
+    }
+
+    auto armnnDelegate = armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions);
+    TfLiteInterpreterOptionsAddDelegate(options, armnnDelegate);
+
+    m_TfLiteDelegate = armnnDelegate;
+    m_TfLiteInterpreter = TfLiteInterpreterCreate(tfLiteModel, options);
+
+    // The options and model can be deleted after the interpreter is created.
+    TfLiteInterpreterOptionsDelete(options);
+    TfLiteModelDelete(tfLiteModel);
+}
+
+void DelegateTestInterpreter::Cleanup()
+{
+    TfLiteInterpreterDelete(m_TfLiteInterpreter);
+
+    if (m_TfLiteDelegate)
+    {
+        armnnDelegate::TfLiteArmnnDelegateDelete(static_cast<TfLiteDelegate*>(m_TfLiteDelegate));
+    }
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/common/src/test/DelegateTestInterpreter.hpp b/delegate/common/src/test/DelegateTestInterpreter.hpp
new file mode 100644
index 0000000..0b63441
--- /dev/null
+++ b/delegate/common/src/test/DelegateTestInterpreter.hpp
@@ -0,0 +1,175 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <DelegateTestInterpreterUtils.hpp>
+
+#include <armnn_delegate.hpp>
+
+#include <armnn/BackendId.hpp>
+#include <armnn/Exceptions.hpp>
+
+#include <tensorflow/lite/core/c/c_api.h>
+#include <tensorflow/lite/kernels/kernel_util.h>
+#include <tensorflow/lite/kernels/custom_ops_register.h>
+#include <tensorflow/lite/kernels/register.h>
+#include <tensorflow/lite/c/c_api_internal.h>
+
+namespace delegateTestInterpreter
+{
+
+class DelegateTestInterpreter
+{
+public:
+    /// Create TfLite Interpreter only
+    DelegateTestInterpreter(std::vector<char>& modelBuffer, const std::string& customOp = "")
+    {
+        TfLiteModel* model = delegateTestInterpreter::CreateTfLiteModel(modelBuffer);
+
+        TfLiteInterpreterOptions* options = delegateTestInterpreter::CreateTfLiteInterpreterOptions();
+        if (!customOp.empty())
+        {
+            options->mutable_op_resolver = delegateTestInterpreter::GenerateCustomOpResolver(customOp);
+        }
+
+        m_TfLiteInterpreter = TfLiteInterpreterCreate(model, options);
+        m_TfLiteDelegate = nullptr;
+
+        // The options and model can be deleted after the interpreter is created.
+        TfLiteInterpreterOptionsDelete(options);
+        TfLiteModelDelete(model);
+    }
+
+    /// Create Interpreter with default Arm NN Classic/Opaque Delegate applied
+    DelegateTestInterpreter(std::vector<char>& model,
+                            const std::vector<armnn::BackendId>& backends,
+                            const std::string& customOp = "",
+                            bool disableFallback = true);
+
+    /// Create Interpreter with Arm NN Classic/Opaque Delegate applied and DelegateOptions
+    DelegateTestInterpreter(std::vector<char>& model,
+                            const armnnDelegate::DelegateOptions& delegateOptions,
+                            const std::string& customOp = "");
+
+    /// Allocate the TfLiteTensors within the graph.
+    /// This must be called before FillInputTensor(values, index) and Invoke().
+    TfLiteStatus AllocateTensors()
+    {
+        return TfLiteInterpreterAllocateTensors(m_TfLiteInterpreter);
+    }
+
+    /// Copy a buffer of values into an input tensor at a given index.
+    template<typename T>
+    TfLiteStatus FillInputTensor(std::vector<T>& inputValues, int index)
+    {
+        TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index);
+        return delegateTestInterpreter::CopyFromBufferToTensor(inputTensor, inputValues);
+    }
+
+    /// Copy a boolean buffer of values into an input tensor at a given index.
+    /// Boolean types get converted to a bit representation in a vector.
+    /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly.
+    TfLiteStatus FillInputTensor(std::vector<bool>& inputValues, int index)
+    {
+        TfLiteTensor* inputTensor = delegateTestInterpreter::GetInputTensorFromInterpreter(m_TfLiteInterpreter, index);
+        if(inputTensor->type != kTfLiteBool)
+        {
+            throw armnn::Exception("Input tensor at the given index is not of bool type: " + std::to_string(index));
+        }
+
+        // Make sure there is enough bytes allocated to copy into.
+        if(inputTensor->bytes < inputValues.size() * sizeof(bool))
+        {
+            throw armnn::Exception("Input tensor has not been allocated to match number of input values.");
+        }
+
+        for (unsigned int i = 0; i < inputValues.size(); ++i)
+        {
+            inputTensor->data.b[i] = inputValues[i];
+        }
+
+        return kTfLiteOk;
+    }
+
+    /// Run the interpreter either on TFLite Runtime or Arm NN Delegate.
+    /// AllocateTensors() must be called before Invoke().
+    TfLiteStatus Invoke()
+    {
+        return TfLiteInterpreterInvoke(m_TfLiteInterpreter);
+    }
+
+    /// Return a buffer of values from the output tensor at a given index.
+    /// This must be called after Invoke().
+    template<typename T>
+    std::vector<T> GetOutputResult(int index)
+    {
+        const TfLiteTensor* outputTensor =
+                delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
+
+        int64_t n = tflite::NumElements(outputTensor);
+        std::vector<T> output;
+        output.resize(n);
+
+        TfLiteStatus status = TfLiteTensorCopyToBuffer(outputTensor, output.data(), output.size() * sizeof(T));
+        if(status != kTfLiteOk)
+        {
+            throw armnn::Exception("An error occurred when copying output buffer.");
+        }
+
+        return output;
+    }
+
+    /// Return a buffer of values from the output tensor at a given index. This must be called after Invoke().
+    /// Boolean types get converted to a bit representation in a vector.
+    /// vector.data() returns a void pointer instead of a pointer to bool, so the tensor needs to be accessed directly.
+    std::vector<bool> GetOutputResult(int index)
+    {
+        const TfLiteTensor* outputTensor =
+                delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
+        if(outputTensor->type != kTfLiteBool)
+        {
+            throw armnn::Exception("Output tensor at the given index is not of bool type: " + std::to_string(index));
+        }
+
+        int64_t n = tflite::NumElements(outputTensor);
+        std::vector<bool> output(n, false);
+        output.reserve(n);
+
+        for (unsigned int i = 0; i < output.size(); ++i)
+        {
+            output[i] = outputTensor->data.b[i];
+        }
+        return output;
+    }
+
+    /// Return a buffer of dimensions from the output tensor at a given index.
+    std::vector<int32_t> GetOutputShape(int index)
+    {
+        const TfLiteTensor* outputTensor =
+                delegateTestInterpreter::GetOutputTensorFromInterpreter(m_TfLiteInterpreter, index);
+        int32_t numDims = TfLiteTensorNumDims(outputTensor);
+
+        std::vector<int32_t> dims;
+        dims.reserve(numDims);
+
+        for (int32_t i = 0; i < numDims; ++i)
+        {
+            dims.push_back(TfLiteTensorDim(outputTensor, i));
+        }
+        return dims;
+    }
+
+    /// Delete TfLiteInterpreter and the TfLiteDelegate/TfLiteOpaqueDelegate
+    void Cleanup();
+
+private:
+    TfLiteInterpreter* m_TfLiteInterpreter;
+
+    /// m_TfLiteDelegate can be TfLiteDelegate or TfLiteOpaqueDelegate
+    void* m_TfLiteDelegate;
+};
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/common/src/test/DelegateTestInterpreterUtils.hpp b/delegate/common/src/test/DelegateTestInterpreterUtils.hpp
new file mode 100644
index 0000000..396c75c
--- /dev/null
+++ b/delegate/common/src/test/DelegateTestInterpreterUtils.hpp
@@ -0,0 +1,110 @@
+//
+// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/Exceptions.hpp>
+
+#include <tensorflow/lite/core/c/c_api.h>
+#include <tensorflow/lite/kernels/custom_ops_register.h>
+#include <tensorflow/lite/kernels/register.h>
+
+#include <type_traits>
+
+namespace delegateTestInterpreter
+{
+
+inline TfLiteTensor* GetInputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index)
+{
+    TfLiteTensor* inputTensor = TfLiteInterpreterGetInputTensor(interpreter, index);
+    if(inputTensor == nullptr)
+    {
+        throw armnn::Exception("Input tensor was not found at the given index: " + std::to_string(index));
+    }
+    return inputTensor;
+}
+
+inline const TfLiteTensor* GetOutputTensorFromInterpreter(TfLiteInterpreter* interpreter, int index)
+{
+    const TfLiteTensor* outputTensor = TfLiteInterpreterGetOutputTensor(interpreter, index);
+    if(outputTensor == nullptr)
+    {
+        throw armnn::Exception("Output tensor was not found at the given index: " + std::to_string(index));
+    }
+    return outputTensor;
+}
+
+inline TfLiteModel* CreateTfLiteModel(std::vector<char>& data)
+{
+    TfLiteModel* tfLiteModel = TfLiteModelCreate(data.data(), data.size());
+    if(tfLiteModel == nullptr)
+    {
+        throw armnn::Exception("An error has occurred when creating the TfLiteModel.");
+    }
+    return tfLiteModel;
+}
+
+inline TfLiteInterpreterOptions* CreateTfLiteInterpreterOptions()
+{
+    TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
+    if(options == nullptr)
+    {
+        throw armnn::Exception("An error has occurred when creating the TfLiteInterpreterOptions.");
+    }
+    return options;
+}
+
+inline tflite::ops::builtin::BuiltinOpResolver GenerateCustomOpResolver(const std::string& opName)
+{
+    tflite::ops::builtin::BuiltinOpResolver opResolver;
+    if (opName == "MaxPool3D")
+    {
+        opResolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
+    }
+    else if (opName == "AveragePool3D")
+    {
+        opResolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
+    }
+    else
+    {
+        throw armnn::Exception("The custom op isn't supported by the DelegateTestInterpreter.");
+    }
+    return opResolver;
+}
+
+template<typename T>
+inline TfLiteStatus CopyFromBufferToTensor(TfLiteTensor* tensor, std::vector<T>& values)
+{
+    // Make sure there is enough bytes allocated to copy into for uint8_t and int16_t case.
+    if(tensor->bytes < values.size() * sizeof(T))
+    {
+        throw armnn::Exception("Tensor has not been allocated to match number of values.");
+    }
+
+    // Requires uint8_t and int16_t specific case as the number of bytes is larger than values passed when creating
+    // TFLite tensors of these types. Otherwise, use generic TfLiteTensorCopyFromBuffer function.
+    TfLiteStatus status = kTfLiteOk;
+    if (std::is_same<T, uint8_t>::value)
+    {
+        for (unsigned int i = 0; i < values.size(); ++i)
+        {
+            tensor->data.uint8[i] = values[i];
+        }
+    }
+    else if (std::is_same<T, int16_t>::value)
+    {
+        for (unsigned int i = 0; i < values.size(); ++i)
+        {
+            tensor->data.i16[i] = values[i];
+        }
+    }
+    else
+    {
+        status = TfLiteTensorCopyFromBuffer(tensor, values.data(), values.size() * sizeof(T));
+    }
+    return status;
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ActivationTestHelper.hpp b/delegate/test/ActivationTestHelper.hpp
index 110c684..e1901b7 100644
--- a/delegate/test/ActivationTestHelper.hpp
+++ b/delegate/test/ActivationTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -69,7 +69,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -80,51 +80,33 @@
                     std::vector<float>& inputValues,
                     std::vector<float>& expectedOutputValues)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<int32_t> inputShape  { { 4, 1, 4} };
     std::vector<char> modelBuffer = CreateActivationTfLiteModel(activationOperatorCode,
-                                                                      ::tflite::TensorType_FLOAT32,
-                                                                      inputShape);
+                                                                ::tflite::TensorType_FLOAT32,
+                                                                inputShape);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-                        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, inputShape);
 
-    // Set input data
-    armnnDelegate::FillInput<float>(tfLiteInterpreter, 0, inputValues);
-    armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 0, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<float>(tfLiteInterpreter,
-                                            armnnDelegateInterpreter,
-                                            inputShape,
-                                            expectedOutputValues);
-
-    tfLiteInterpreter.reset(nullptr);
-    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ArgMinMaxTestHelper.hpp b/delegate/test/ArgMinMaxTestHelper.hpp
index 91cf1f8..fd230ff 100644
--- a/delegate/test/ArgMinMaxTestHelper.hpp
+++ b/delegate/test/ArgMinMaxTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -119,7 +119,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -139,7 +139,7 @@
                    float quantScale = 1.0f,
                    int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateArgMinMaxTfLiteModel<InputT, OutputT>(argMinMaxOperatorCode,
                                                                                 tensorType,
                                                                                 inputShape,
@@ -150,50 +150,27 @@
                                                                                 quantScale,
                                                                                 quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    CHECK(tfLiteModel != nullptr);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<InputT>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<OutputT> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<OutputT>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-          (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<InputT>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<OutputT> armnnOutputValues = armnnInterpreter.GetOutputResult<OutputT>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-          (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<OutputT>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<InputT>(tfLiteInterpreter, 0, inputValues);
-    armnnDelegate::FillInput<InputT>(armnnDelegateInterpreter, 0, inputValues);
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<OutputT>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<OutputT>(armnnDelegateOutputId);
-
-    for (size_t i = 0; i < expectedOutputValues.size(); i++)
-    {
-        CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
-        CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]);
-        CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
-    }
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/BatchMatMulTestHelper.hpp b/delegate/test/BatchMatMulTestHelper.hpp
index 32b0a4f..d45f438 100644
--- a/delegate/test/BatchMatMulTestHelper.hpp
+++ b/delegate/test/BatchMatMulTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -111,7 +111,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -132,7 +132,7 @@
                    float quantScale = 1.0f,
                    int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode,
                                                                  tensorType,
                                                                  LHSInputShape,
@@ -143,62 +143,29 @@
                                                                  quantScale,
                                                                  quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    CHECK(tfLiteModel != nullptr);
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(LHSInputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(RHSInputValues, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(LHSInputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(RHSInputValues, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Set input data
-    auto tfLiteDelegateLHSInputId = tfLiteInterpreter->inputs()[0];
-    auto tfLiteDelegateLHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateLHSInputId);
-    auto tfLiteDelegateRHSInputId = tfLiteInterpreter->inputs()[1];
-    auto tfLiteDelegateRHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateRHSInputId);
-    for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
-    {
-        tfLiteDelegateLHSInputData[i] = LHSInputValues[i];
-    }
-    for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
-    {
-        tfLiteDelegateRHSInputData[i] = RHSInputValues[i];
-    }
-
-    auto armnnDelegateLHSInputId = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateLHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateLHSInputId);
-    auto armnnDelegateRHSInputId = armnnDelegateInterpreter->inputs()[1];
-    auto armnnDelegateRHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateRHSInputId);
-    for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
-    {
-        armnnDelegateLHSInputData[i] = LHSInputValues[i];
-    }
-    for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
-    {
-        armnnDelegateRHSInputData[i] = RHSInputValues[i];
-    }
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter,
-                                     outputShape, expectedOutputValues);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
diff --git a/delegate/test/BatchSpaceTestHelper.hpp b/delegate/test/BatchSpaceTestHelper.hpp
index 597139d..ba6afb1 100644
--- a/delegate/test/BatchSpaceTestHelper.hpp
+++ b/delegate/test/BatchSpaceTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -143,7 +143,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -162,7 +162,7 @@
                     float quantScale = 1.0f,
                     int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateBatchSpaceTfLiteModel(controlOperatorCode,
                                                                 tensorType,
                                                                 inputShape,
@@ -172,47 +172,27 @@
                                                                 quantScale,
                                                                 quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
-                                        armnnDelegateInterpreter,
-                                        expectedOutputShape,
-                                        expectedOutputValues);
-
-    armnnDelegateInterpreter.reset(nullptr);
-    tfLiteInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/CastTestHelper.hpp b/delegate/test/CastTestHelper.hpp
index be1967c..ac8f033 100644
--- a/delegate/test/CastTestHelper.hpp
+++ b/delegate/test/CastTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -90,7 +90,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
@@ -105,55 +105,34 @@
               float quantScale = 1.0f,
               int quantOffset = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateCastTfLiteModel(inputTensorType,
                                                           outputTensorType,
                                                           shape,
                                                           quantScale,
                                                           quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<K>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<K>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegate) == kTfLiteOk);
-    CHECK(armnnDelegate != nullptr);
-    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<K>       armnnOutputValues = armnnInterpreter.GetOutputResult<K>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteDelegate) == kTfLiteOk);
-    CHECK(tfLiteDelegate != nullptr);
-    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<K>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<K>(tfLiteDelegate,
-                                        armnnDelegate,
-                                        shape,
-                                        expectedOutputValues,
-                                        0);
-
-    tfLiteDelegate.reset(nullptr);
-    armnnDelegate.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
diff --git a/delegate/test/ComparisonTestHelper.hpp b/delegate/test/ComparisonTestHelper.hpp
index ef9f87a..a1114cb 100644
--- a/delegate/test/ComparisonTestHelper.hpp
+++ b/delegate/test/ComparisonTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -141,7 +141,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -160,7 +160,7 @@
                     float quantScale = 1.0f,
                     int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateComparisonTfLiteModel(comparisonOperatorCode,
                                                                 tensorType,
                                                                 input0Shape,
@@ -169,70 +169,32 @@
                                                                 quantScale,
                                                                 quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<bool>    tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<bool>    armnnOutputValues = armnnInterpreter.GetOutputResult(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareData(expectedOutputValues, armnnOutputValues, expectedOutputValues.size());
+    armnnDelegate::CompareData(expectedOutputValues, tfLiteOutputValues, expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues.size());
 
-    // Set input data
-    auto tfLiteDelegateInput0Id = tfLiteInterpreter->inputs()[0];
-    auto tfLiteDelageInput0Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput0Id);
-    for (unsigned int i = 0; i < input0Values.size(); ++i)
-    {
-        tfLiteDelageInput0Data[i] = input0Values[i];
-    }
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    auto tfLiteDelegateInput1Id = tfLiteInterpreter->inputs()[1];
-    auto tfLiteDelageInput1Data = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInput1Id);
-    for (unsigned int i = 0; i < input1Values.size(); ++i)
-    {
-        tfLiteDelageInput1Data[i] = input1Values[i];
-    }
-
-    auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput0Id);
-    for (unsigned int i = 0; i < input0Values.size(); ++i)
-    {
-        armnnDelegateInput0Data[i] = input0Values[i];
-    }
-
-    auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
-    auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInput1Id);
-    for (unsigned int i = 0; i < input1Values.size(); ++i)
-    {
-        armnnDelegateInput1Data[i] = input1Values[i];
-    }
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-    // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<bool>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<bool>(armnnDelegateOutputId);
-
-    armnnDelegate::CompareData(expectedOutputValues  , armnnDelegateOutputData, expectedOutputValues.size());
-    armnnDelegate::CompareData(expectedOutputValues  , tfLiteDelageOutputData , expectedOutputValues.size());
-    armnnDelegate::CompareData(tfLiteDelageOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ControlTestHelper.hpp b/delegate/test/ControlTestHelper.hpp
index f68cc07..9e082a7 100644
--- a/delegate/test/ControlTestHelper.hpp
+++ b/delegate/test/ControlTestHelper.hpp
@@ -8,17 +8,15 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
-#include <doctest/doctest.h>
+#include <schema_generated.h>
 
-#include <string>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -108,7 +106,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -201,7 +199,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -219,7 +217,7 @@
                        float quantScale = 1.0f,
                        int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateConcatTfLiteModel(controlOperatorCode,
                                                             tensorType,
                                                             inputShapes,
@@ -229,51 +227,33 @@
                                                             quantScale,
                                                             quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data for all input tensors.
     for (unsigned int i = 0; i < inputValues.size(); ++i)
     {
-        // Get single input tensor and assign to interpreters.
-        auto inputTensorValues = inputValues[i];
-        armnnDelegate::FillInput<T>(tfLiteInterpreter, i, inputTensorValues);
-        armnnDelegate::FillInput<T>(armnnDelegateInterpreter, i, inputTensorValues);
+        CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues[i], i) == kTfLiteOk);
+        CHECK(armnnInterpreter.FillInputTensor<T>(inputValues[i], i) == kTfLiteOk);
     }
 
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
-                                        armnnDelegateInterpreter,
-                                        expectedOutputShape,
-                                        expectedOutputValues);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    armnnDelegateInterpreter.reset(nullptr);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 template <typename T>
@@ -290,7 +270,7 @@
               float quantScale = 1.0f,
               int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateMeanTfLiteModel(controlOperatorCode,
                                                           tensorType,
                                                           input0Shape,
@@ -301,46 +281,27 @@
                                                           quantScale,
                                                           quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
-    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
-                                        armnnDelegateInterpreter,
-                                        expectedOutputShape,
-                                        expectedOutputValues);
-
-    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ConvolutionTestHelper.hpp b/delegate/test/ConvolutionTestHelper.hpp
index 2e211b2..6a3400e 100644
--- a/delegate/test/ConvolutionTestHelper.hpp
+++ b/delegate/test/ConvolutionTestHelper.hpp
@@ -8,6 +8,7 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
 #include <tensorflow/lite/interpreter.h>
@@ -186,7 +187,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -222,10 +223,9 @@
                      int32_t filterQuantizationDim = 3)
 
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
 
     std::vector<char> modelBuffer;
-
     modelBuffer = CreateConv2dTfLiteModel(convolutionOperatorCode,
                                           tensorType,
                                           strideX,
@@ -251,59 +251,27 @@
                                           depth_multiplier,
                                           filterQuantizationDim);
 
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-                        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
-    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        tfLiteDelageInputData[i] = inputValues[i];
-    }
-
-    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        armnnDelegateInputData[i] = inputValues[i];
-    }
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-    for (size_t i = 0; i < expectedOutputValues.size(); i++)
-    {
-        CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
-        CHECK(doctest::Approx(tfLiteDelagateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
-        CHECK(doctest::Approx(armnnDelegateOutputData[i]).epsilon(0.000001f) == expectedOutputValues[i]);
-    }
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 // Conv3d is only correctly supported for external delegates from TF Lite v2.6, as there was a breaking bug in v2.5.
@@ -457,7 +425,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -490,7 +458,7 @@
                        int32_t depth_multiplier = 1,
                        int32_t filterQuantizationDim = 3)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
 
     std::vector<char> modelBuffer;
     modelBuffer = CreateConv3dTfLiteModel(convolutionOperatorCode,
@@ -516,48 +484,30 @@
                                           depth_multiplier,
                                           filterQuantizationDim);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
+    armnnDelegate::CompareData(expectedOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 1);
+    armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteOutputValues.data(), expectedOutputValues.size(), 1);
+    armnnDelegate::CompareData(tfLiteOutputValues.data(), armnnOutputValues.data(), expectedOutputValues.size(), 1);
 
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-
-    armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size(), 1);
-    armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size(), 1);
-    armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size(), 1);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 #endif
 
@@ -675,7 +625,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -702,7 +652,7 @@
                        float quantScale = 1.0f,
                        int quantOffset = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
 
     std::vector<char> modelBuffer;
     modelBuffer = CreateTransposeConvTfLiteModel<T>(tensorType,
@@ -723,58 +673,27 @@
                                                     quantOffset);
 
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 2) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 2) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape);
 
-    // Set input data
-    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[2];
-    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        tfLiteDelageInputData[i] = inputValues[i];
-    }
-
-    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[2];
-    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        armnnDelegateInputData[i] = inputValues[i];
-    }
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-    for (size_t i = 0; i < expectedOutputValues.size(); i++)
-    {
-        CHECK(armnnDelegateOutputData[i] == expectedOutputValues[i]);
-        CHECK(tfLiteDelagateOutputData[i] == expectedOutputValues[i]);
-        CHECK(tfLiteDelagateOutputData[i] == armnnDelegateOutputData[i]);
-    }
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
diff --git a/delegate/test/DelegateOptionsTest.cpp b/delegate/test/DelegateOptionsTest.cpp
index ecd8c73..d84d420 100644
--- a/delegate/test/DelegateOptionsTest.cpp
+++ b/delegate/test/DelegateOptionsTest.cpp
@@ -30,7 +30,6 @@
         armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
 
         DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
-                                  backends,
                                   tensorShape,
                                   inputData,
                                   inputData,
@@ -60,7 +59,6 @@
         armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
 
         DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
-                                  backends,
                                   tensorShape,
                                   inputData,
                                   inputData,
@@ -104,7 +102,6 @@
     CHECK(!callback);
 
     DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
-                              backends,
                               tensorShape,
                               inputData,
                               inputData,
@@ -118,7 +115,7 @@
 
 TEST_CASE ("ArmnnDelegateOptimizerOptionsImport")
 {
-    std::vector<armnn::BackendId> backends = {  armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
     std::vector<int32_t> tensorShape { 1, 2, 2, 1 };
     std::vector<uint8_t> inputData = { 1, 2, 3, 4 };
     std::vector<uint8_t> divData = { 2, 2, 3, 4 };
@@ -128,7 +125,6 @@
     armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
 
     DelegateOptionTest<uint8_t>(::tflite::TensorType_UINT8,
-                                backends,
                                 tensorShape,
                                 inputData,
                                 inputData,
@@ -164,7 +160,6 @@
 
     armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
     DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
-                                        backends,
                                         tensorShape,
                                         inputData,
                                         expectedResult,
@@ -200,7 +195,6 @@
 
     armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
     DelegateOptionNoFallbackTest<float>(::tflite::TensorType_FLOAT32,
-                                        backends,
                                         tensorShape,
                                         inputData,
                                         expectedResult,
@@ -237,7 +231,6 @@
     armnnDelegate::DelegateOptions delegateOptions(backends, optimizerOptions);
 
     DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
-                              backends,
                               tensorShape,
                               inputData,
                               inputData,
@@ -268,7 +261,6 @@
         // Enable serialize to dot by specifying the target file name.
         delegateOptions.SetSerializeToDot(filename);
         DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
-                                  backends,
                                   tensorShape,
                                   inputData,
                                   inputData,
@@ -309,7 +301,6 @@
 
     armnnDelegate::DelegateOptions delegateOptions(options_keys.get(), options_values.get(), num_options, nullptr);
     DelegateOptionTest<float>(::tflite::TensorType_FLOAT32,
-                              backends,
                               tensorShape,
                               inputData,
                               inputData,
diff --git a/delegate/test/DelegateOptionsTestHelper.hpp b/delegate/test/DelegateOptionsTestHelper.hpp
index fb5403c..b6974c9 100644
--- a/delegate/test/DelegateOptionsTestHelper.hpp
+++ b/delegate/test/DelegateOptionsTestHelper.hpp
@@ -5,17 +5,17 @@
 
 #pragma once
 
-#include <armnn_delegate.hpp>
-
 #include "TestUtils.hpp"
 
+#include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
+
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -146,7 +146,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -218,14 +218,13 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
 
 template <typename T>
 void DelegateOptionTest(tflite::TensorType tensorType,
-                        const std::vector<armnn::BackendId>& backends,
                         std::vector<int32_t>& tensorShape,
                         std::vector<T>& input0Values,
                         std::vector<T>& input1Values,
@@ -235,55 +234,41 @@
                         float quantScale = 1.0f,
                         int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateAddDivTfLiteModel(tensorType,
                                                             tensorShape,
                                                             quantScale,
                                                             quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(input2Values, 2) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(input2Values, 2) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape);
 
-    // Set input data
-    armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values);
-    armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values);
-    armnnDelegate::FillInput(tfLiteInterpreter, 2, input2Values);
-
-    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values);
-    armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values);
-    armnnDelegate::FillInput(armnnDelegateInterpreter, 2, input2Values);
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
-
-    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 template <typename T>
 void DelegateOptionNoFallbackTest(tflite::TensorType tensorType,
-                                  const std::vector<armnn::BackendId>& backends,
                                   std::vector<int32_t>& tensorShape,
                                   std::vector<T>& inputValues,
                                   std::vector<T>& expectedOutputValues,
@@ -291,53 +276,39 @@
                                   float quantScale = 1.0f,
                                   int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateCeilTfLiteModel(tensorType,
                                                           tensorShape,
                                                           quantScale,
                                                           quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
+    tfLiteInterpreter.Cleanup();
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
     try
     {
-        armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get());
+        auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, delegateOptions);
+        CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+        CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+        CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+        std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+        std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
+        armnnInterpreter.Cleanup();
+
+        armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+        armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape);
     }
     catch (const armnn::Exception& e)
     {
         // Forward the exception message to std::cout
         std::cout << e.what() << std::endl;
     }
-
-    // Set input data
-    armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
-    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
-
-    armnnDelegateInterpreter.reset(nullptr);
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/DepthwiseConvolution2dTest.cpp b/delegate/test/DepthwiseConvolution2dTest.cpp
index 9ee589c..5fdbfc4 100644
--- a/delegate/test/DepthwiseConvolution2dTest.cpp
+++ b/delegate/test/DepthwiseConvolution2dTest.cpp
@@ -25,7 +25,7 @@
     std::vector<int32_t> inputShape { 1, 3, 2, 2 };
     std::vector<int32_t> filterShape { 1, 2, 2, 4 };
     std::vector<int32_t> biasShape { 4 };
-    std::vector<int32_t> outputShape { 1, 3, 3, 1 };
+    std::vector<int32_t> outputShape { 1, 2, 1, 4 };
 
     static std::vector<float> inputValues =
         {
diff --git a/delegate/test/ElementwiseBinaryTestHelper.hpp b/delegate/test/ElementwiseBinaryTestHelper.hpp
index 47ee7c2..fa9cbb8 100644
--- a/delegate/test/ElementwiseBinaryTestHelper.hpp
+++ b/delegate/test/ElementwiseBinaryTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -164,7 +164,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -185,7 +185,7 @@
                            int quantOffset  = 0,
                            bool constantInput = false)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateElementwiseBinaryTfLiteModel<T>(binaryOperatorCode,
                                                                           activationType,
                                                                           tensorType,
@@ -197,47 +197,29 @@
                                                                           quantScale,
                                                                           quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr <Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr <Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(input1Values, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                     armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
-    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
-    if (!constantInput)
-    {
-        armnnDelegate::FillInput<T>(tfLiteInterpreter, 1, input1Values);
-        armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 1, input1Values);
-    }
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
-                                        armnnDelegateInterpreter,
-                                        outputShape,
-                                        expectedOutputValues);
-    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ElementwiseUnaryTestHelper.hpp b/delegate/test/ElementwiseUnaryTestHelper.hpp
index f6a534a..7f8879b 100644
--- a/delegate/test/ElementwiseUnaryTestHelper.hpp
+++ b/delegate/test/ElementwiseUnaryTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -69,7 +69,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -80,48 +80,33 @@
                               std::vector<float>& inputValues,
                               std::vector<float>& expectedOutputValues)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<int32_t> inputShape  { { 3, 1, 2} };
     std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
                                                                       ::tflite::TensorType_FLOAT32,
                                                                       inputShape);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-                        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, inputShape);
 
-    // Set input data
-    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
-    armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, inputShape, expectedOutputValues);
-
-    armnnDelegateInterpreter.reset(nullptr);
-    tfLiteInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 void ElementwiseUnaryBoolTest(tflite::BuiltinOperator unaryOperatorCode,
@@ -130,56 +115,35 @@
                               std::vector<bool>& inputValues,
                               std::vector<bool>& expectedOutputValues)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateElementwiseUnaryTfLiteModel(unaryOperatorCode,
                                                                       ::tflite::TensorType_BOOL,
                                                                       inputShape);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<bool>    tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<bool>    armnnOutputValues = armnnInterpreter.GetOutputResult(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
+    armnnDelegate::CompareData(expectedOutputValues, armnnOutputValues, expectedOutputValues.size());
+    armnnDelegate::CompareData(expectedOutputValues, tfLiteOutputValues, expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues.size());
 
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, inputShape);
 
-    // Set input data
-    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, inputValues);
-    armnnDelegate::FillInput(tfLiteInterpreter, 0, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function
-    // directly instead. This is because Boolean types get converted to a bit representation in a vector.
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<bool>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<bool>(armnnDelegateOutputId);
-
-    armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size());
-    armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size());
-    armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
-
-    armnnDelegateInterpreter.reset(nullptr);
-    tfLiteInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
diff --git a/delegate/test/FillTestHelper.hpp b/delegate/test/FillTestHelper.hpp
index c8aadb0..70162c4 100644
--- a/delegate/test/FillTestHelper.hpp
+++ b/delegate/test/FillTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -102,7 +102,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -118,42 +118,32 @@
               std::vector<T>& expectedOutputValues,
               T fillValue)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateFillTfLiteModel<T>(fillOperatorCode,
                                                              tensorType,
                                                              inputShape,
                                                              tensorShape,
                                                              {fillValue});
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    CHECK(tfLiteModel != nullptr);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-          (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-          (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, tensorShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, tensorShape, expectedOutputValues);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
diff --git a/delegate/test/FullyConnectedTestHelper.hpp b/delegate/test/FullyConnectedTestHelper.hpp
index d6bbd93..e9e5c09 100644
--- a/delegate/test/FullyConnectedTestHelper.hpp
+++ b/delegate/test/FullyConnectedTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -159,7 +159,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -180,7 +180,7 @@
                         float quantScale = 1.0f,
                         int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
 
     std::vector<char> modelBuffer = CreateFullyConnectedTfLiteModel(tensorType,
                                                                     activationType,
@@ -192,64 +192,50 @@
                                                                     constantWeights,
                                                                     quantScale,
                                                                     quantOffset);
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                     armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
 
     if (!constantWeights)
     {
-        armnnDelegate::FillInput<T>(tfLiteInterpreter, 1, weightsData);
-        armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 1, weightsData);
+        CHECK(tfLiteInterpreter.FillInputTensor<T>(weightsData, 1) == kTfLiteOk);
+        CHECK(armnnInterpreter.FillInputTensor<T>(weightsData, 1) == kTfLiteOk);
 
         if (tensorType == ::tflite::TensorType_INT8)
         {
             std::vector <int32_t> biasData = {10};
-            armnnDelegate::FillInput<int32_t>(tfLiteInterpreter, 2, biasData);
-            armnnDelegate::FillInput<int32_t>(armnnDelegateInterpreter, 2, biasData);
+            CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(biasData, 2) == kTfLiteOk);
+            CHECK(armnnInterpreter.FillInputTensor<int32_t>(biasData, 2) == kTfLiteOk);
         }
         else
         {
             std::vector<float> biasData = {10};
-            armnnDelegate::FillInput<float>(tfLiteInterpreter, 2, biasData);
-            armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 2, biasData);
+            CHECK(tfLiteInterpreter.FillInputTensor<float>(biasData, 2) == kTfLiteOk);
+            CHECK(armnnInterpreter.FillInputTensor<float>(biasData, 2) == kTfLiteOk);
         }
     }
 
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
-                                        armnnDelegateInterpreter,
-                                        outputTensorShape,
-                                        expectedOutputValues);
-    armnnDelegateInterpreter.reset(nullptr);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
+
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape);
+
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/GatherNdTestHelper.hpp b/delegate/test/GatherNdTestHelper.hpp
index 7b1595b..604b215 100644
--- a/delegate/test/GatherNdTestHelper.hpp
+++ b/delegate/test/GatherNdTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -108,7 +108,7 @@
                                                modelDescription,
                                                flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -126,56 +126,35 @@
                 float quantScale = 1.0f,
                 int quantOffset = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateGatherNdTfLiteModel(tensorType,
                                                             paramsShape,
                                                             indicesShape,
                                                             expectedOutputShape,
                                                             quantScale,
                                                             quantOffset);
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegate) == kTfLiteOk);
-    CHECK(armnnDelegate != nullptr);
-    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteDelegate) == kTfLiteOk);
-    CHECK(tfLiteDelegate != nullptr);
-    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                     armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, paramsValues);
-    armnnDelegate::FillInput<T>(armnnDelegate, 0, paramsValues);
-    armnnDelegate::FillInput<int32_t>(tfLiteDelegate, 1, indicesValues);
-    armnnDelegate::FillInput<int32_t>(armnnDelegate, 1, indicesValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
-                                        armnnDelegate,
-                                        expectedOutputShape,
-                                        expectedOutputValues,
-                                        0);
-
-    tfLiteDelegate.reset(nullptr);
-    armnnDelegate.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/GatherTestHelper.hpp b/delegate/test/GatherTestHelper.hpp
index 41e3b55..43717a3 100644
--- a/delegate/test/GatherTestHelper.hpp
+++ b/delegate/test/GatherTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -109,7 +109,7 @@
                                                modelDescription,
                                                flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -128,7 +128,7 @@
                 float quantScale = 1.0f,
                 int quantOffset = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateGatherTfLiteModel(tensorType,
                                                             paramsShape,
                                                             indicesShape,
@@ -136,49 +136,28 @@
                                                             axis,
                                                             quantScale,
                                                             quantOffset);
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegate) == kTfLiteOk);
-    CHECK(armnnDelegate != nullptr);
-    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(paramsValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<int32_t>(indicesValues, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteDelegate) == kTfLiteOk);
-    CHECK(tfLiteDelegate != nullptr);
-    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                     armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, paramsValues);
-    armnnDelegate::FillInput<T>(armnnDelegate, 0, paramsValues);
-    armnnDelegate::FillInput<int32_t>(tfLiteDelegate, 1, indicesValues);
-    armnnDelegate::FillInput<int32_t>(armnnDelegate, 1, indicesValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
-                                        armnnDelegate,
-                                        expectedOutputShape,
-                                        expectedOutputValues,
-                                        0);
-
-    tfLiteDelegate.reset(nullptr);
-    armnnDelegate.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/LogicalTest.cpp b/delegate/test/LogicalTest.cpp
index 57bbd31..8414293 100644
--- a/delegate/test/LogicalTest.cpp
+++ b/delegate/test/LogicalTest.cpp
@@ -27,15 +27,15 @@
     std::vector<bool> input1Values { 0, 1, 0, 1 };
     std::vector<bool> expectedOutputValues { 0, 0, 0, 1 };
 
-    LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_AND,
-                            ::tflite::TensorType_BOOL,
-                            backends,
-                            input0Shape,
-                            input1Shape,
-                            expectedOutputShape,
-                            input0Values,
-                            input1Values,
-                            expectedOutputValues);
+    LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_AND,
+                      ::tflite::TensorType_BOOL,
+                      backends,
+                      input0Shape,
+                      input1Shape,
+                      expectedOutputShape,
+                      input0Values,
+                      input1Values,
+                      expectedOutputValues);
 }
 
 void LogicalBinaryAndBroadcastTest(std::vector<armnn::BackendId>& backends)
@@ -48,15 +48,15 @@
     std::vector<bool> input1Values { 1 };
     std::vector<bool> expectedOutputValues { 0, 1, 0, 1 };
 
-    LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_AND,
-                            ::tflite::TensorType_BOOL,
-                            backends,
-                            input0Shape,
-                            input1Shape,
-                            expectedOutputShape,
-                            input0Values,
-                            input1Values,
-                            expectedOutputValues);
+    LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_AND,
+                      ::tflite::TensorType_BOOL,
+                      backends,
+                      input0Shape,
+                      input1Shape,
+                      expectedOutputShape,
+                      input0Values,
+                      input1Values,
+                      expectedOutputValues);
 }
 
 void LogicalBinaryOrBoolTest(std::vector<armnn::BackendId>& backends)
@@ -69,15 +69,15 @@
     std::vector<bool> input1Values { 0, 1, 0, 1 };
     std::vector<bool> expectedOutputValues { 0, 1, 1, 1 };
 
-    LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_OR,
-                            ::tflite::TensorType_BOOL,
-                            backends,
-                            input0Shape,
-                            input1Shape,
-                            expectedOutputShape,
-                            input0Values,
-                            input1Values,
-                            expectedOutputValues);
+    LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_OR,
+                      ::tflite::TensorType_BOOL,
+                      backends,
+                      input0Shape,
+                      input1Shape,
+                      expectedOutputShape,
+                      input0Values,
+                      input1Values,
+                      expectedOutputValues);
 }
 
 void LogicalBinaryOrBroadcastTest(std::vector<armnn::BackendId>& backends)
@@ -90,15 +90,15 @@
     std::vector<bool> input1Values { 1 };
     std::vector<bool> expectedOutputValues { 1, 1, 1, 1 };
 
-    LogicalBinaryTest<bool>(tflite::BuiltinOperator_LOGICAL_OR,
-                            ::tflite::TensorType_BOOL,
-                            backends,
-                            input0Shape,
-                            input1Shape,
-                            expectedOutputShape,
-                            input0Values,
-                            input1Values,
-                            expectedOutputValues);
+    LogicalBinaryTest(tflite::BuiltinOperator_LOGICAL_OR,
+                      ::tflite::TensorType_BOOL,
+                      backends,
+                      input0Shape,
+                      input1Shape,
+                      expectedOutputShape,
+                      input0Values,
+                      input1Values,
+                      expectedOutputValues);
 }
 
 // LogicalNot operator uses ElementwiseUnary unary layer and descriptor but is still classed as logical operator.
diff --git a/delegate/test/LogicalTestHelper.hpp b/delegate/test/LogicalTestHelper.hpp
index 2f2ae7b..7da8ad9 100644
--- a/delegate/test/LogicalTestHelper.hpp
+++ b/delegate/test/LogicalTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -120,26 +120,25 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
 
-template <typename T>
 void LogicalBinaryTest(tflite::BuiltinOperator logicalOperatorCode,
                        tflite::TensorType tensorType,
                        std::vector<armnn::BackendId>& backends,
                        std::vector<int32_t>& input0Shape,
                        std::vector<int32_t>& input1Shape,
                        std::vector<int32_t>& expectedOutputShape,
-                       std::vector<T>& input0Values,
-                       std::vector<T>& input1Values,
-                       std::vector<T>& expectedOutputValues,
+                       std::vector<bool>& input0Values,
+                       std::vector<bool>& input1Values,
+                       std::vector<bool>& expectedOutputValues,
                        float quantScale = 1.0f,
                        int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateLogicalBinaryTfLiteModel(logicalOperatorCode,
                                                                    tensorType,
                                                                    input0Shape,
@@ -148,54 +147,32 @@
                                                                    quantScale,
                                                                    quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<bool>    tfLiteOutputValues = tfLiteInterpreter.GetOutputResult(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor(input0Values, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor(input1Values, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<bool>    armnnOutputValues = armnnInterpreter.GetOutputResult(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
 
-    // Set input data for the armnn interpreter
-    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input0Values);
-    armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input1Values);
+    armnnDelegate::CompareData(expectedOutputValues, armnnOutputValues, expectedOutputValues.size());
+    armnnDelegate::CompareData(expectedOutputValues, tfLiteOutputValues, expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteOutputValues, armnnOutputValues, expectedOutputValues.size());
 
-    // Set input data for the tflite interpreter
-    armnnDelegate::FillInput(tfLiteInterpreter, 0, input0Values);
-    armnnDelegate::FillInput(tfLiteInterpreter, 1, input1Values);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data, comparing Boolean values is handled differently and needs to call the CompareData function
-    // directly. This is because Boolean types get converted to a bit representation in a vector.
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-
-    armnnDelegate::CompareData(expectedOutputValues, armnnDelegateOutputData, expectedOutputValues.size());
-    armnnDelegate::CompareData(expectedOutputValues, tfLiteDelegateOutputData, expectedOutputValues.size());
-    armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
-
-    armnnDelegateInterpreter.reset(nullptr);
-    tfLiteInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/LstmTestHelper.hpp b/delegate/test/LstmTestHelper.hpp
index 14776ca..4ff5175 100644
--- a/delegate/test/LstmTestHelper.hpp
+++ b/delegate/test/LstmTestHelper.hpp
@@ -8,14 +8,13 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
-#include <tensorflow/lite/c/common.h>
+
+#include <schema_generated.h>
 
 #include <doctest/doctest.h>
 
@@ -539,7 +538,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -591,7 +590,7 @@
                   float clippingThresCell,
                   float clippingThresProj)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
 
     std::vector<char> modelBuffer = CreateLstmTfLiteModel(tensorType,
                                                           batchSize,
@@ -635,57 +634,29 @@
                                                           clippingThresCell,
                                                           clippingThresProj);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    std::vector<int32_t> expectedOutputShape {batchSize , outputSize};
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                     armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Set input data
-    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
-    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        tfLiteDelageInputData[i] = inputValues[i];
-    }
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
 
-    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        armnnDelegateInputData[i] = inputValues[i];
-    }
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-
-    armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size());
-    armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size());
-    armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/NormalizationTestHelper.hpp b/delegate/test/NormalizationTestHelper.hpp
index eafdf84..a9db6b8 100644
--- a/delegate/test/NormalizationTestHelper.hpp
+++ b/delegate/test/NormalizationTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -110,7 +110,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -131,7 +131,7 @@
                        float quantScale = 1.0f,
                        int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateNormalizationTfLiteModel(normalizationOperatorCode,
                                                                    tensorType,
                                                                    inputShape,
@@ -143,40 +143,27 @@
                                                                    quantScale,
                                                                    quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    CHECK(tfLiteModel != nullptr);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-          (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-          (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 void L2NormalizationTest(std::vector<armnn::BackendId>& backends)
diff --git a/delegate/test/PackTestHelper.hpp b/delegate/test/PackTestHelper.hpp
index 0fd2f19..112eccb 100644
--- a/delegate/test/PackTestHelper.hpp
+++ b/delegate/test/PackTestHelper.hpp
@@ -8,17 +8,15 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
-#include <doctest/doctest.h>
+#include <schema_generated.h>
 
-#include <string>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -108,7 +106,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -126,7 +124,7 @@
               float quantScale = 1.0f,
               int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreatePackTfLiteModel(packOperatorCode,
                                                           tensorType,
                                                           inputShape,
@@ -136,51 +134,35 @@
                                                           quantScale,
                                                           quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
 
     // Set input data for all input tensors.
     for (unsigned int i = 0; i < inputValues.size(); ++i)
     {
-        // Get single input tensor and assign to interpreters.
         auto inputTensorValues = inputValues[i];
-        armnnDelegate::FillInput<T>(tfLiteInterpreter, i, inputTensorValues);
-        armnnDelegate::FillInput<T>(armnnDelegateInterpreter, i, inputTensorValues);
+        CHECK(tfLiteInterpreter.FillInputTensor<T>(inputTensorValues, i) == kTfLiteOk);
+        CHECK(armnnInterpreter.FillInputTensor<T>(inputTensorValues, i) == kTfLiteOk);
     }
 
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
-                                        armnnDelegateInterpreter,
-                                        expectedOutputShape,
-                                        expectedOutputValues);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    armnnDelegateInterpreter.reset(nullptr);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
+
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/PadTestHelper.hpp b/delegate/test/PadTestHelper.hpp
index d049c52..c4bfd89 100644
--- a/delegate/test/PadTestHelper.hpp
+++ b/delegate/test/PadTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -153,7 +153,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -174,7 +174,7 @@
              int quantOffset  = 0,
              tflite::MirrorPadMode paddingMode = tflite::MirrorPadMode_SYMMETRIC)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreatePadTfLiteModel<T>(padOperatorCode,
                                                             tensorType,
                                                             paddingMode,
@@ -186,39 +186,27 @@
                                                             quantScale,
                                                             quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    CHECK(tfLiteModel != nullptr);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-          (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-          (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
diff --git a/delegate/test/Pooling2dTestHelper.hpp b/delegate/test/Pooling2dTestHelper.hpp
index 6de85b6..d08a45b 100644
--- a/delegate/test/Pooling2dTestHelper.hpp
+++ b/delegate/test/Pooling2dTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -106,7 +106,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers, 3));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -129,7 +129,7 @@
                    float quantScale = 1.0f,
                    int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreatePooling2dTfLiteModel(poolingOperatorCode,
                                                                tensorType,
                                                                inputShape,
@@ -143,50 +143,27 @@
                                                                quantScale,
                                                                quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    CHECK(tfLiteModel != nullptr);
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Set input data
-    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
-    auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        tfLiteDelegateInputData[i] = inputValues[i];
-    }
-
-    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        armnnDelegateInputData[i] = inputValues[i];
-    }
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
diff --git a/delegate/test/Pooling3dTestHelper.hpp b/delegate/test/Pooling3dTestHelper.hpp
index dd90e4b..59d2e18 100644
--- a/delegate/test/Pooling3dTestHelper.hpp
+++ b/delegate/test/Pooling3dTestHelper.hpp
@@ -8,16 +8,16 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
 #include <flatbuffers/flexbuffers.h>
-#include <tensorflow/lite/interpreter.h>
-#include <tensorflow/lite/kernels/custom_ops_register.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
+#include <tensorflow/lite/kernels/custom_ops_register.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -131,7 +131,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -156,7 +156,7 @@
                    float quantScale = 1.0f,
                    int quantOffset = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     // Create the single op model buffer
     std::vector<char> modelBuffer = CreatePooling3dTfLiteModel(poolType,
                                                                tensorType,
@@ -173,79 +173,37 @@
                                                                quantScale,
                                                                quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    CHECK(tfLiteModel != nullptr);
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-
-    // Custom ops need to be added to the BuiltinOp resolver before the interpreter is created
-    // Based on the poolType from the test case add the custom operator using the name and the tflite
-    // registration function
-    tflite::ops::builtin::BuiltinOpResolver armnn_op_resolver;
+    std::string opType = "";
     if (poolType == "kMax")
     {
-        armnn_op_resolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
+        opType = "MaxPool3D";
     }
     else
     {
-        armnn_op_resolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
+        opType = "AveragePool3D";
     }
 
-    CHECK(InterpreterBuilder(tfLiteModel, armnn_op_resolver)
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer, opType);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends, opType);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Custom ops need to be added to the BuiltinOp resolver before the interpreter is created
-    // Based on the poolType from the test case add the custom operator using the name and the tflite
-    // registration function
-    tflite::ops::builtin::BuiltinOpResolver tflite_op_resolver;
-    if (poolType == "kMax")
-    {
-        tflite_op_resolver.AddCustom("MaxPool3D", tflite::ops::custom::Register_MAX_POOL_3D());
-    }
-    else
-    {
-        tflite_op_resolver.AddCustom("AveragePool3D", tflite::ops::custom::Register_AVG_POOL_3D());
-    }
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    CHECK(InterpreterBuilder(tfLiteModel, tflite_op_resolver)
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
-    auto tfLiteDelegateInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        tfLiteDelegateInputData[i] = inputValues[i];
-    }
-
-    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        armnnDelegateInputData[i] = inputValues[i];
-    }
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 // Function to create the flexbuffer custom options for the custom pooling3d operator.
diff --git a/delegate/test/PreluTest.cpp b/delegate/test/PreluTest.cpp
index 40bf1dd..f65e15b 100644
--- a/delegate/test/PreluTest.cpp
+++ b/delegate/test/PreluTest.cpp
@@ -18,7 +18,8 @@
 
 namespace armnnDelegate {
 
-void PreluFloatSimpleTest(std::vector <armnn::BackendId>& backends, bool isAlphaConst, bool isDynamicOutput = false) {
+void PreluFloatSimpleTest(std::vector <armnn::BackendId>& backends, bool isAlphaConst, bool isDynamicOutput = false)
+{
     std::vector<int32_t> inputShape { 1, 2, 3 };
     std::vector<int32_t> alphaShape { 1 };
     std::vector<int32_t> outputShape { 1, 2, 3 };
diff --git a/delegate/test/PreluTestHelper.hpp b/delegate/test/PreluTestHelper.hpp
index 0721c13..c2a9435 100644
--- a/delegate/test/PreluTestHelper.hpp
+++ b/delegate/test/PreluTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -107,7 +107,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -124,7 +124,7 @@
                std::vector<float>& expectedOutput,
                bool alphaIsConstant)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
 
     std::vector<char> modelBuffer = CreatePreluTfLiteModel(preluOperatorCode,
                                                            tensorType,
@@ -134,62 +134,42 @@
                                                            alphaData,
                                                            alphaIsConstant);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
 
-    CHECK(tfLiteModel != nullptr);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
 
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
 
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-          (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-          (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<float>(tfLiteInterpreter, 0, inputData);
-    armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 0, inputData);
+    CHECK(armnnInterpreter.FillInputTensor<float>(inputData, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<float>(inputData, 0) == kTfLiteOk);
 
     // Set alpha data if not constant
-    if (!alphaIsConstant) {
-        armnnDelegate::FillInput<float>(tfLiteInterpreter, 1, alphaData);
-        armnnDelegate::FillInput<float>(armnnDelegateInterpreter, 1, alphaData);
-    }
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-
-    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
-
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-
-    for (size_t i = 0; i < expectedOutput.size(); i++)
+    if (!alphaIsConstant)
     {
-        CHECK(expectedOutput[i] == armnnDelegateOutputData[i]);
-        CHECK(tfLiteDelegateOutputData[i] == expectedOutput[i]);
-        CHECK(tfLiteDelegateOutputData[i] == armnnDelegateOutputData[i]);
+        CHECK(tfLiteInterpreter.FillInputTensor<float>(alphaData, 1) == kTfLiteOk);
+        CHECK(armnnInterpreter.FillInputTensor<float>(alphaData, 1) == kTfLiteOk);
     }
+
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+
+    armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutput);
+
+    // Don't compare shapes on dynamic output tests, as output shape gets cleared.
+    if(!outputShape.empty())
+    {
+        std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
+        std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
+        armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
+    }
+
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/QuantizationTestHelper.hpp b/delegate/test/QuantizationTestHelper.hpp
index af898f3..8554a01 100644
--- a/delegate/test/QuantizationTestHelper.hpp
+++ b/delegate/test/QuantizationTestHelper.hpp
@@ -5,15 +5,17 @@
 
 #pragma once
 
+#include "TestUtils.hpp"
+
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -112,7 +114,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -130,7 +132,7 @@
                       float quantScale = 1.0f,
                       int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateQuantizationTfLiteModel(quantizeOperatorCode,
                                                                   inputTensorType,
                                                                   outputTensorType,
@@ -139,62 +141,27 @@
                                                                   quantScale,
                                                                   quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<OutputT> tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<OutputT>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<OutputT> armnnOutputValues = armnnInterpreter.GetOutputResult<OutputT>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<OutputT>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
-    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<InputT>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        tfLiteDelageInputData[i] = inputValues[i];
-    }
-
-    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<InputT>(armnnDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        armnnDelegateInputData[i] = inputValues[i];
-    }
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<OutputT>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<OutputT>(armnnDelegateOutputId);
-
-    for (size_t i = 0; i < expectedOutputValues.size(); i++)
-    {
-        CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
-        CHECK(tfLiteDelageOutputData[i] == expectedOutputValues[i]);
-        CHECK(tfLiteDelageOutputData[i] == armnnDelegateOutputData[i]);
-    }
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/RedefineTestHelper.hpp b/delegate/test/RedefineTestHelper.hpp
index ce60db0..80631cc 100644
--- a/delegate/test/RedefineTestHelper.hpp
+++ b/delegate/test/RedefineTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -135,7 +135,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -154,7 +154,7 @@
                   float quantScale = 1.0f,
                   int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateRedefineTfLiteModel(redefineOperatorCode,
                                                               tensorType,
                                                               inputShape,
@@ -164,39 +164,27 @@
                                                               quantScale,
                                                               quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    CHECK(tfLiteModel != nullptr);
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ReduceTestHelper.hpp b/delegate/test/ReduceTestHelper.hpp
index fedf7ee..a268981 100644
--- a/delegate/test/ReduceTestHelper.hpp
+++ b/delegate/test/ReduceTestHelper.hpp
@@ -8,17 +8,15 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
-#include <doctest/doctest.h>
+#include <schema_generated.h>
 
-#include <string>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -140,7 +138,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers, 4));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -160,7 +158,7 @@
                 float quantScale = 1.0f,
                 int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBufferArmNN = CreateReduceTfLiteModel(reduceOperatorCode,
                                                                  tensorType,
                                                                  input0Shape,
@@ -182,47 +180,27 @@
                                                                   quantOffset,
                                                                   true);
 
-    const Model* tfLiteModelArmNN = GetModel(modelBufferArmNN.data());
-    const Model* tfLiteModelTFLite = GetModel(modelBufferTFLite.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBufferTFLite);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModelArmNN, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBufferArmNN, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(input0Values, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModelTFLite, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, input0Values);
-    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, input0Values);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
-                                        armnnDelegateInterpreter,
-                                        expectedOutputShape,
-                                        expectedOutputValues);
-
-    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/ResizeTest.cpp b/delegate/test/ResizeTest.cpp
index 2011387..f3bfe43 100644
--- a/delegate/test/ResizeTest.cpp
+++ b/delegate/test/ResizeTest.cpp
@@ -42,7 +42,7 @@
 
     const std::vector<int32_t> input1Shape { 1, 3, 3, 1 };
     const std::vector<int32_t> input2Shape { 2 };
-    const std::vector<int32_t> expectedOutputShape = input2NewShape;
+    const std::vector<int32_t> expectedOutputShape = { 1, 5, 5, 1 };
 
     ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_BILINEAR,
                        backends,
@@ -66,7 +66,7 @@
 
     const std::vector<int32_t> input1Shape { 1, 2, 2, 1 };
     const std::vector<int32_t> input2Shape { 2 };
-    const std::vector<int32_t> expectedOutputShape = input2NewShape;
+    const std::vector<int32_t> expectedOutputShape = { 1, 1, 1, 1 };
 
     ResizeFP32TestImpl(tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR,
                        backends,
diff --git a/delegate/test/ResizeTestHelper.hpp b/delegate/test/ResizeTestHelper.hpp
index ab7de14..ff0c413 100644
--- a/delegate/test/ResizeTestHelper.hpp
+++ b/delegate/test/ResizeTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -113,7 +113,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -128,7 +128,7 @@
                         std::vector<float>& expectedOutputValues,
                         std::vector<int32_t> expectedOutputShape)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
 
     std::vector<char> modelBuffer = CreateResizeTfLiteModel(operatorCode,
                                                             ::tflite::TensorType_FLOAT32,
@@ -137,58 +137,29 @@
                                                             input2Shape,
                                                             expectedOutputShape);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<float>(input1Values, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(input2NewShape, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // The model will be executed using tflite and using the armnn delegate so that the outputs
-    // can be compared.
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<float>(input1Values, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<int32_t>(input2NewShape, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreter with armnn delegate
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
 
-    // Create TfLite Interpreter without armnn delegate
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-                        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data for the armnn interpreter
-    armnnDelegate::FillInput(armnnDelegateInterpreter, 0, input1Values);
-    armnnDelegate::FillInput(armnnDelegateInterpreter, 1, input2NewShape);
-
-    // Set input data for the tflite interpreter
-    armnnDelegate::FillInput(tfLiteInterpreter, 0, input1Values);
-    armnnDelegate::FillInput(tfLiteInterpreter, 1, input2NewShape);
-
-    // Run EnqueWorkload
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-    for (size_t i = 0; i < expectedOutputValues.size(); i++)
-    {
-        CHECK(expectedOutputValues[i] == doctest::Approx(armnnDelegateOutputData[i]));
-        CHECK(armnnDelegateOutputData[i] == doctest::Approx(tfLiteDelageOutputData[i]));
-    }
-
-    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/RoundTestHelper.hpp b/delegate/test/RoundTestHelper.hpp
index dc14abf..3aa066b 100644
--- a/delegate/test/RoundTestHelper.hpp
+++ b/delegate/test/RoundTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -94,7 +94,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
@@ -109,55 +109,34 @@
                float quantScale = 1.0f,
                int quantOffset = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateRoundTfLiteModel(roundOperatorCode,
                                                            tensorType,
                                                            shape,
                                                            quantScale,
                                                            quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegate) == kTfLiteOk);
-    CHECK(armnnDelegate != nullptr);
-    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteDelegate) == kTfLiteOk);
-    CHECK(tfLiteDelegate != nullptr);
-    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
-                                        armnnDelegate,
-                                        shape,
-                                        expectedOutputValues,
-                                        0);
-
-    tfLiteDelegate.reset(nullptr);
-    armnnDelegate.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
diff --git a/delegate/test/ShapeTestHelper.hpp b/delegate/test/ShapeTestHelper.hpp
index 54e27ac..42f258b 100644
--- a/delegate/test/ShapeTestHelper.hpp
+++ b/delegate/test/ShapeTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -97,7 +97,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -114,7 +114,7 @@
                float quantScale = 1.0f,
                int quantOffset = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateShapeTfLiteModel(inputTensorType,
                                                            outputTensorType,
                                                            inputShape,
@@ -122,52 +122,25 @@
                                                            quantScale,
                                                            quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<K>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<K>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegate;
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<K>       armnnOutputValues = armnnInterpreter.GetOutputResult<K>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegate) == kTfLiteOk);
-    CHECK(armnnDelegate != nullptr);
-    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<K>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
 
-    std::unique_ptr<Interpreter> tfLiteDelegate;
-
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteDelegate) == kTfLiteOk);
-    CHECK(tfLiteDelegate != nullptr);
-    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-
-    std::unique_ptr < TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete) >
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<K>(tfLiteDelegate,
-                                        armnnDelegate,
-                                        expectedOutputShape,
-                                        expectedOutputValues,
-                                        0);
-
-    tfLiteDelegate.reset(nullptr);
-    armnnDelegate.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
diff --git a/delegate/test/SliceTestHelper.hpp b/delegate/test/SliceTestHelper.hpp
index c938fad..19f2b3d 100644
--- a/delegate/test/SliceTestHelper.hpp
+++ b/delegate/test/SliceTestHelper.hpp
@@ -8,18 +8,15 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
-#include <armnn/DescriptorsFwd.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
-#include <doctest/doctest.h>
+#include <schema_generated.h>
 
-#include <string>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -110,7 +107,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers, 5));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -127,7 +124,7 @@
                    std::vector<int32_t>& sizeTensorShape,
                    std::vector<int32_t>& outputTensorShape)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateSliceTfLiteModel(
         ::tflite::TensorType_FLOAT32,
         inputTensorShape,
@@ -137,47 +134,27 @@
         sizeTensorShape,
         outputTensorShape);
 
-    auto tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-        (&armnnDelegate) == kTfLiteOk);
-    CHECK(armnnDelegate != nullptr);
-    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-        (&tfLiteDelegate) == kTfLiteOk);
-    CHECK(tfLiteDelegate != nullptr);
-    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
-                                        armnnDelegate,
-                                        outputTensorShape,
-                                        expectedOutputValues);
-
-    tfLiteDelegate.reset(nullptr);
-    armnnDelegate.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 } // End of Slice Test
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/SoftmaxTestHelper.hpp b/delegate/test/SoftmaxTestHelper.hpp
index 15177b7..ffd02ab 100644
--- a/delegate/test/SoftmaxTestHelper.hpp
+++ b/delegate/test/SoftmaxTestHelper.hpp
@@ -5,16 +5,18 @@
 
 #pragma once
 
+#include "TestUtils.hpp"
+
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 #include <armnnUtils/FloatingPointComparison.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -95,7 +97,7 @@
                     flatBufferBuilder.CreateVector(&subgraph, 1),
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
@@ -108,65 +110,33 @@
                  std::vector<float>& expectedOutputValues,
                  float beta = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateSoftmaxTfLiteModel(softmaxOperatorCode,
                                                              tensorType,
                                                              shape,
                                                              beta);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, shape);
 
-    // Set input data
-    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
-    auto tfLiteInterpreterInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        tfLiteInterpreterInputData[i] = inputValues[i];
-    }
-
-    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        armnnDelegateInputData[i] = inputValues[i];
-    }
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-
-    for (size_t i = 0; i < inputValues.size(); ++i)
-    {
-         CHECK(armnnUtils::within_percentage_tolerance(expectedOutputValues[i], armnnDelegateOutputData[i], 0.1));
-         CHECK(armnnUtils::within_percentage_tolerance(tfLiteInterpreterOutputData[i],
-                                                       armnnDelegateOutputData[i], 0.1));
-    }
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 
diff --git a/delegate/test/SpaceDepthTestHelper.hpp b/delegate/test/SpaceDepthTestHelper.hpp
index 6e8e39d..912472d 100644
--- a/delegate/test/SpaceDepthTestHelper.hpp
+++ b/delegate/test/SpaceDepthTestHelper.hpp
@@ -8,14 +8,14 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -108,7 +108,7 @@
                     flatBufferBuilder.CreateVector(&subgraph, 1),
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
@@ -123,46 +123,34 @@
                     std::vector<T>& expectedOutputValues,
                     int32_t blockSize = 2)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateSpaceDepthTfLiteModel(spaceDepthOperatorCode,
                                                                 tensorType,
                                                                 inputShape,
                                                                 outputShape,
                                                                 blockSize);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter, outputShape, expectedOutputValues);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
diff --git a/delegate/test/SplitTestHelper.hpp b/delegate/test/SplitTestHelper.hpp
index 503fbc8..1d5f459 100644
--- a/delegate/test/SplitTestHelper.hpp
+++ b/delegate/test/SplitTestHelper.hpp
@@ -8,17 +8,15 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
-#include <doctest/doctest.h>
+#include <schema_generated.h>
 
-#include <string>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -113,7 +111,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -132,7 +130,7 @@
                float quantScale = 1.0f,
                int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateSplitTfLiteModel(tensorType,
                                                            axisTensorShape,
                                                            inputTensorShape,
@@ -141,51 +139,34 @@
                                                            numSplits,
                                                            quantScale,
                                                            quantOffset);
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegate) == kTfLiteOk);
-    CHECK(armnnDelegate != nullptr);
-    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
-
-    std::unique_ptr<Interpreter> tfLiteDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteDelegate) == kTfLiteOk);
-    CHECK(tfLiteDelegate != nullptr);
-    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteDelegate, 1, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegate, 1, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
 
     // Compare output data
     for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
     {
-        armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
-                                            armnnDelegate,
-                                            outputTensorShapes[i],
-                                            expectedOutputValues[i],
-                                            i);
+        std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
+        std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(i);
+
+        std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
+        std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(i);
+
+        armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
+        armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShapes[i]);
     }
 
-    tfLiteDelegate.reset(nullptr);
-    armnnDelegate.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
+
 } // End of SPLIT Test
 
 std::vector<char> CreateSplitVTfLiteModel(tflite::TensorType tensorType,
@@ -288,7 +269,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -309,7 +290,7 @@
                 float quantScale = 1.0f,
                 int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateSplitVTfLiteModel(tensorType,
                                                             inputTensorShape,
                                                             splitsTensorShape,
@@ -320,51 +301,34 @@
                                                             numSplits,
                                                             quantScale,
                                                             quantOffset);
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegate) == kTfLiteOk);
-    CHECK(armnnDelegate != nullptr);
-    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
 
-    std::unique_ptr<Interpreter> tfLiteDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteDelegate) == kTfLiteOk);
-    CHECK(tfLiteDelegate != nullptr);
-    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
 
     // Compare output data
     for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
     {
-        armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
-                                            armnnDelegate,
-                                            outputTensorShapes[i],
-                                            expectedOutputValues[i],
-                                            i);
+        std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
+        std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(i);
+
+        std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
+        std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(i);
+
+        armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
+        armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShapes[i]);
     }
 
-    tfLiteDelegate.reset(nullptr);
-    armnnDelegate.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 } // End of SPLIT_V Test
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/StridedSliceTestHelper.hpp b/delegate/test/StridedSliceTestHelper.hpp
index fde7e16..d3d1601 100644
--- a/delegate/test/StridedSliceTestHelper.hpp
+++ b/delegate/test/StridedSliceTestHelper.hpp
@@ -8,18 +8,15 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
-#include <armnn/DescriptorsFwd.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
-#include <doctest/doctest.h>
+#include <schema_generated.h>
 
-#include <string>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -132,7 +129,7 @@
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers, 6));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -157,7 +154,7 @@
                           const int32_t ShrinkAxisMask = 0,
                           const armnn::DataLayout& dataLayout = armnn::DataLayout::NHWC)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateStridedSliceTfLiteModel(
             ::tflite::TensorType_FLOAT32,
             inputTensorShape,
@@ -175,47 +172,27 @@
             ShrinkAxisMask,
             dataLayout);
 
-    auto tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegate) == kTfLiteOk);
-    CHECK(armnnDelegate != nullptr);
-    CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteDelegate;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteDelegate) == kTfLiteOk);
-    CHECK(tfLiteDelegate != nullptr);
-    CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputTensorShape);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegate->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteDelegate, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegate, 0, inputValues);
-
-    // Run EnqueWorkload
-    CHECK(tfLiteDelegate->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegate->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    armnnDelegate::CompareOutputData<T>(tfLiteDelegate,
-                                        armnnDelegate,
-                                        outputTensorShape,
-                                        expectedOutputValues);
-
-    tfLiteDelegate.reset(nullptr);
-    armnnDelegate.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 } // End of StridedSlice Test
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/TestUtils.cpp b/delegate/test/TestUtils.cpp
index 2689c2e..0d53d94 100644
--- a/delegate/test/TestUtils.cpp
+++ b/delegate/test/TestUtils.cpp
@@ -17,7 +17,7 @@
     }
 }
 
-void CompareData(std::vector<bool>& tensor1, bool tensor2[], size_t tensorSize)
+void CompareData(std::vector<bool>& tensor1, std::vector<bool>& tensor2, size_t tensorSize)
 {
     auto compareBool = [](auto a, auto b) {return (((a == 0) && (b == 0)) || ((a != 0) && (b != 0)));};
     for (size_t i = 0; i < tensorSize; i++)
@@ -108,44 +108,18 @@
     }
 }
 
-template <>
-void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
-                       std::unique_ptr<tflite::Interpreter>& armnnDelegateInterpreter,
-                       std::vector<int32_t>& expectedOutputShape,
-                       std::vector<Half>& expectedOutputValues,
-                       unsigned int outputIndex)
+void CompareOutputShape(const std::vector<int32_t>& tfLiteDelegateShape,
+                        const std::vector<int32_t>& armnnDelegateShape,
+                        const std::vector<int32_t>& expectedOutputShape)
 {
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
-    auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
-    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex];
-    auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<TfLiteFloat16>(armnnDelegateOutputId);
-
-        CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
-        CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
+    CHECK(expectedOutputShape.size() == tfLiteDelegateShape.size());
+    CHECK(expectedOutputShape.size() == armnnDelegateShape.size());
 
     for (size_t i = 0; i < expectedOutputShape.size(); i++)
     {
-        CHECK(armnnDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
-        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
-        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
-    }
-
-    armnnDelegate::CompareData(armnnDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
-    armnnDelegate::CompareData(tfLiteDelegateOutputData, expectedOutputValues.data(), expectedOutputValues.size());
-    armnnDelegate::CompareData(tfLiteDelegateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
-}
-
-template <>
-void FillInput<Half>(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<Half>& inputValues)
-{
-    auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
-    auto tfLiteDelageInputData = interpreter->typed_tensor<TfLiteFloat16>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        tfLiteDelageInputData[i].data = half_float::detail::float2half<std::round_indeterminate, float>(inputValues[i]);
-
+        CHECK(expectedOutputShape[i] == armnnDelegateShape[i]);
+        CHECK(tfLiteDelegateShape[i] == expectedOutputShape[i]);
+        CHECK(tfLiteDelegateShape[i] == armnnDelegateShape[i]);
     }
 }
 
diff --git a/delegate/test/TestUtils.hpp b/delegate/test/TestUtils.hpp
index 95dd257..ba81cd8 100644
--- a/delegate/test/TestUtils.hpp
+++ b/delegate/test/TestUtils.hpp
@@ -17,26 +17,12 @@
 namespace armnnDelegate
 {
 
-/// Can be used to assign input data from a vector to a model input.
-/// Example usage can be found in ResizeTesthelper.hpp
-template <typename T>
-void FillInput(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<T>& inputValues)
-{
-    auto tfLiteDelegateInputId = interpreter->inputs()[inputIndex];
-    auto tfLiteDelageInputData = interpreter->typed_tensor<T>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        tfLiteDelageInputData[i] = inputValues[i];
-    }
-}
-
-template <>
-void FillInput(std::unique_ptr<tflite::Interpreter>& interpreter, int inputIndex, std::vector<Half>& inputValues);
+constexpr const char* FILE_IDENTIFIER = "TFL3";
 
 /// Can be used to compare bool data coming from a tflite interpreter
 /// Boolean types get converted to a bit representation in a vector. vector.data() returns a void pointer
 /// instead of a pointer to bool. Therefore a special function to compare to vector of bool is required
-void CompareData(std::vector<bool>& tensor1, bool tensor2[], size_t tensorSize);
+void CompareData(std::vector<bool>& tensor1, std::vector<bool>& tensor2, size_t tensorSize);
 void CompareData(bool tensor1[], bool tensor2[], size_t tensorSize);
 
 /// Can be used to compare float data coming from a tflite interpreter with a tolerance of limit_of_float*100
@@ -66,36 +52,22 @@
 /// Can be used to compare Half (Float16) data and TfLiteFloat16 data coming from a tflite interpreter
 void CompareData(TfLiteFloat16 tensor1[], Half tensor2[], size_t tensorSize);
 
-/// Can be used to compare the output tensor shape and values
-/// from armnnDelegateInterpreter and tfLiteInterpreter.
+/// Can be used to compare the output tensor shape
+/// Example usage can be found in ControlTestHelper.hpp
+void CompareOutputShape(const std::vector<int32_t>& tfLiteDelegateShape,
+                        const std::vector<int32_t>& armnnDelegateShape,
+                        const std::vector<int32_t>& expectedOutputShape);
+
+/// Can be used to compare the output tensor values
 /// Example usage can be found in ControlTestHelper.hpp
 template <typename T>
-void CompareOutputData(std::unique_ptr<tflite::Interpreter>& tfLiteInterpreter,
-                       std::unique_ptr<tflite::Interpreter>& armnnDelegateInterpreter,
-                       std::vector<int32_t>& expectedOutputShape,
-                       std::vector<T>& expectedOutputValues,
-                       unsigned int outputIndex = 0)
+void CompareOutputData(std::vector<T>& tfLiteDelegateOutputs,
+                       std::vector<T>& armnnDelegateOutputs,
+                       std::vector<T>& expectedOutputValues)
 {
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
-    auto tfLiteDelegateOutputTensor = tfLiteInterpreter->tensor(tfLiteDelegateOutputId);
-    auto tfLiteDelegateOutputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[outputIndex];
-    auto armnnDelegateOutputTensor = armnnDelegateInterpreter->tensor(armnnDelegateOutputId);
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateOutputId);
-
-    CHECK(expectedOutputShape.size() == tfLiteDelegateOutputTensor->dims->size);
-    CHECK(expectedOutputShape.size() == armnnDelegateOutputTensor->dims->size);
-
-    for (size_t i = 0; i < expectedOutputShape.size(); i++)
-    {
-        CHECK(expectedOutputShape[i] == armnnDelegateOutputTensor->dims->data[i]);
-        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == expectedOutputShape[i]);
-        CHECK(tfLiteDelegateOutputTensor->dims->data[i] == armnnDelegateOutputTensor->dims->data[i]);
-    }
-
-    armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData    , expectedOutputValues.size());
-    armnnDelegate::CompareData(tfLiteDelegateOutputData   , expectedOutputValues.data(), expectedOutputValues.size());
-    armnnDelegate::CompareData(tfLiteDelegateOutputData   , armnnDelegateOutputData    , expectedOutputValues.size());
+    armnnDelegate::CompareData(expectedOutputValues.data(),  armnnDelegateOutputs.data(), expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelegateOutputs.data(), expectedOutputValues.data(), expectedOutputValues.size());
+    armnnDelegate::CompareData(tfLiteDelegateOutputs.data(), armnnDelegateOutputs.data(), expectedOutputValues.size());
 }
 
 } // namespace armnnDelegate
diff --git a/delegate/test/TransposeTest.cpp b/delegate/test/TransposeTest.cpp
index c210128..cb3b327 100644
--- a/delegate/test/TransposeTest.cpp
+++ b/delegate/test/TransposeTest.cpp
@@ -13,6 +13,28 @@
 namespace armnnDelegate
 {
 
+void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
+{
+    // set test input data
+    std::vector<int32_t> input0Shape {4, 2, 3};
+    std::vector<int32_t> inputPermVecShape {3};
+    std::vector<int32_t> outputShape {3, 4, 2};
+
+    std::vector<float> input0Values = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11,
+                                       12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23};
+    std::vector<int32_t> inputPermVec = {2, 0, 1};
+    std::vector<float> expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10,
+                                               13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23};
+
+    TransposeTest<float>(backends,
+                         input0Shape,
+                         inputPermVecShape,
+                         outputShape,
+                         input0Values,
+                         inputPermVec,
+                         expectedOutputValues);
+}
+
 TEST_SUITE ("Transpose_GpuAccTests")
 {
 
@@ -37,10 +59,13 @@
 
 TEST_SUITE ("Transpose_CpuRefTests")
 {
+
 TEST_CASE ("Transpose_Float32_CpuRef_Test")
 {
-        std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
-        TransposeFP32Test(backends);
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuRef };
+    TransposeFP32Test(backends);
 }
+
 }
+
 } // namespace armnnDelegate
diff --git a/delegate/test/TransposeTestHelper.hpp b/delegate/test/TransposeTestHelper.hpp
index 99bb60b..57f4e29 100644
--- a/delegate/test/TransposeTestHelper.hpp
+++ b/delegate/test/TransposeTestHelper.hpp
@@ -5,15 +5,17 @@
 
 #pragma once
 
+#include "TestUtils.hpp"
+
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
+#include <schema_generated.h>
+
 #include <doctest/doctest.h>
 
 namespace
@@ -76,102 +78,51 @@
                         flatBufferBuilder.CreateVector(&subgraph, 1),
                         modelDescription,
                         flatBufferBuilder.CreateVector(buffers, 4));
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
 
-void TransposeFP32Test(std::vector<armnn::BackendId>& backends)
+template <typename T>
+void TransposeTest(std::vector<armnn::BackendId>& backends,
+                   std::vector<int32_t>& inputShape,
+                   std::vector<int32_t>& inputPermVecShape,
+                   std::vector<int32_t>& outputShape,
+                   std::vector<T>& inputValues,
+                   std::vector<int32_t>& inputPermVec,
+                   std::vector<T>& expectedOutputValues)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
 
-    // set test input data
-    std::vector<int32_t> input0Shape {4, 2, 3};
-    std::vector<int32_t> inputPermVecShape {3};
-    std::vector<int32_t> outputShape {2, 3, 4};
-
-    std::vector<float> input0Values = {0,  1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11,
-                                       12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23};
-    std::vector<int32_t> inputPermVec = {2, 0, 1};
-    std::vector<float> expectedOutputValues = {0, 3, 6, 9, 12, 15, 18, 21, 1, 4, 7, 10,
-                                               13, 16, 19, 22, 2, 5, 8, 11, 14, 17, 20, 23};
-
-    // create model
+    // Create model
     std::vector<char> modelBuffer = CreateTransposeTfLiteModel(::tflite::TensorType_FLOAT32,
-                                                               input0Shape,
+                                                               inputShape,
                                                                inputPermVecShape,
                                                                outputShape,
                                                                inputPermVec);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<int32_t>(inputPermVec, 1) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<int32_t>(inputPermVec, 1) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                             armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+    armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
-    // Set input data for tflite
-    auto tfLiteInterpreterInput0Id = tfLiteInterpreter->inputs()[0];
-    auto tfLiteInterpreterInput0Data = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterInput0Id);
-    for (unsigned int i = 0; i < input0Values.size(); ++i)
-    {
-        tfLiteInterpreterInput0Data[i] = input0Values[i];
-    }
-
-    auto tfLiteInterpreterInput1Id = tfLiteInterpreter->inputs()[1];
-    auto tfLiteInterpreterInput1Data = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteInterpreterInput1Id);
-    for (unsigned int i = 0; i < inputPermVec.size(); ++i)
-    {
-        tfLiteInterpreterInput1Data[i] = inputPermVec[i];
-    }
-
-    //Set input data for armnn delegate
-    auto armnnDelegateInput0Id = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInput0Data = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInput0Id);
-    for (unsigned int i = 0; i < input0Values.size(); ++i)
-    {
-        armnnDelegateInput0Data[i] = input0Values[i];
-    }
-
-    auto armnnDelegateInput1Id = armnnDelegateInterpreter->inputs()[1];
-    auto armnnDelegateInput1Data = armnnDelegateInterpreter->typed_tensor<int32_t>(armnnDelegateInput1Id);
-    for (unsigned int i = 0; i < inputPermVec.size(); ++i)
-    {
-        armnnDelegateInput1Data[i] = inputPermVec[i];
-    }
-
-    // Run EnqueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-    // Compare output data
-    auto tfLiteInterpreterOutputId = tfLiteInterpreter->outputs()[0];
-    auto tfLiteInterpreterOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteInterpreterOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
-    for (size_t i = 0; i < expectedOutputValues.size(); ++i)
-    {
-        CHECK(expectedOutputValues[i] == armnnDelegateOutputData[i]);
-        CHECK(tfLiteInterpreterOutputData[i] == expectedOutputValues[i]);
-        CHECK(tfLiteInterpreterOutputData[i] == armnnDelegateOutputData[i]);
-    }
-
-    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 }
diff --git a/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp b/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp
index 0ff04e7..c058d83 100644
--- a/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp
+++ b/delegate/test/UnidirectionalSequenceLstmTestHelper.hpp
@@ -8,14 +8,13 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
-#include <tensorflow/lite/c/common.h>
+
+#include <schema_generated.h>
 
 #include <doctest/doctest.h>
 
@@ -569,7 +568,7 @@
                                                modelDescription,
                                                flatBufferBuilder.CreateVector(buffers));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -624,7 +623,7 @@
                                         bool isTimeMajor,
                                         float quantScale = 0.1f)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
 
     std::vector<char> modelBuffer = CreateUnidirectionalSequenceLstmTfLiteModel(tensorType,
                                                                                 batchSize,
@@ -671,72 +670,51 @@
                                                                                 isTimeMajor,
                                                                                 quantScale);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-                                   theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                                                    armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    auto tfLiteDelegateInputId = tfLiteInterpreter->inputs()[0];
-    auto tfLiteDelageInputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
+    std::vector<int32_t> outputShape;
+    if (isTimeMajor)
     {
-        tfLiteDelageInputData[i] = inputValues[i];
+        outputShape = {timeSize, batchSize, outputSize};
+    }
+    else
+    {
+        outputShape = {batchSize, timeSize, outputSize};
     }
 
-    auto armnnDelegateInputId   = armnnDelegateInterpreter->inputs()[0];
-    auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
-    for (unsigned int i = 0; i < inputValues.size(); ++i)
-    {
-        armnnDelegateInputData[i] = inputValues[i];
-    }
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<float>(0);
+    std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(0);
 
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<float>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
+    std::vector<float>   armnnOutputValues = armnnInterpreter.GetOutputResult<float>(0);
+    std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(0);
 
-    // Compare output data
-    auto tfLiteDelegateOutputId   = tfLiteInterpreter->outputs()[0];
-    auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId    = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData  = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+    armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, outputShape);
 
     if (tensorType == ::tflite::TensorType_INT8)
     {
         // Allow 2% tolerance for Quantized weights
-        armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData,
+        armnnDelegate::CompareData(expectedOutputValues.data(), armnnOutputValues.data(),
                                    expectedOutputValues.size(), 2);
-        armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData,
+        armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteOutputValues.data(),
                                    expectedOutputValues.size(), 2);
-        armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData,
+        armnnDelegate::CompareData(tfLiteOutputValues.data(), armnnOutputValues.data(),
                                    expectedOutputValues.size(), 2);
     }
     else
     {
-        armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData,
-                                   expectedOutputValues.size());
-        armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData,
-                                   expectedOutputValues.size());
-        armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
+        armnnDelegate::CompareOutputData<float>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues);
     }
+
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file
diff --git a/delegate/test/UnpackTestHelper.hpp b/delegate/test/UnpackTestHelper.hpp
index a4c6bc0..2d6565f 100644
--- a/delegate/test/UnpackTestHelper.hpp
+++ b/delegate/test/UnpackTestHelper.hpp
@@ -8,17 +8,15 @@
 #include "TestUtils.hpp"
 
 #include <armnn_delegate.hpp>
+#include <DelegateTestInterpreter.hpp>
 
 #include <flatbuffers/flatbuffers.h>
-#include <tensorflow/lite/interpreter.h>
 #include <tensorflow/lite/kernels/register.h>
-#include <tensorflow/lite/model.h>
-#include <schema_generated.h>
 #include <tensorflow/lite/version.h>
 
-#include <doctest/doctest.h>
+#include <schema_generated.h>
 
-#include <string>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -110,7 +108,7 @@
                     modelDescription,
                     flatBufferBuilder.CreateVector(buffers));
 
-    flatBufferBuilder.Finish(flatbufferModel);
+    flatBufferBuilder.Finish(flatbufferModel, armnnDelegate::FILE_IDENTIFIER);
 
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -128,7 +126,7 @@
               float quantScale = 1.0f,
               int quantOffset  = 0)
 {
-    using namespace tflite;
+    using namespace delegateTestInterpreter;
     std::vector<char> modelBuffer = CreateUnpackTfLiteModel(unpackOperatorCode,
                                                             tensorType,
                                                             inputShape,
@@ -138,51 +136,33 @@
                                                             quantScale,
                                                             quantOffset);
 
-    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    // Setup interpreter with just TFLite Runtime.
+    auto tfLiteInterpreter = DelegateTestInterpreter(modelBuffer);
+    CHECK(tfLiteInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(tfLiteInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(tfLiteInterpreter.Invoke() == kTfLiteOk);
 
-    // Create TfLite Interpreters
-    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter != nullptr);
-    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
-    std::unique_ptr<Interpreter> tfLiteInterpreter;
-    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
-    CHECK(tfLiteInterpreter != nullptr);
-    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
-    // Create the ArmNN Delegate
-    armnnDelegate::DelegateOptions delegateOptions(backends);
-    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-                                    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                                                     armnnDelegate::TfLiteArmnnDelegateDelete);
-    CHECK(theArmnnDelegate != nullptr);
-
-    // Modify armnnDelegateInterpreter to use armnnDelegate
-    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-    // Set input data
-    armnnDelegate::FillInput<T>(tfLiteInterpreter, 0, inputValues);
-    armnnDelegate::FillInput<T>(armnnDelegateInterpreter, 0, inputValues);
-
-
-    // Run EnqueueWorkload
-    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+    // Setup interpreter with Arm NN Delegate applied.
+    auto armnnInterpreter = DelegateTestInterpreter(modelBuffer, backends);
+    CHECK(armnnInterpreter.AllocateTensors() == kTfLiteOk);
+    CHECK(armnnInterpreter.FillInputTensor<T>(inputValues, 0) == kTfLiteOk);
+    CHECK(armnnInterpreter.Invoke() == kTfLiteOk);
 
     // Compare output data
     for (unsigned int i = 0; i < expectedOutputValues.size(); ++i)
     {
-        armnnDelegate::CompareOutputData<T>(tfLiteInterpreter,
-                                            armnnDelegateInterpreter,
-                                            expectedOutputShape,
-                                            expectedOutputValues[i],
-                                            i);
+        std::vector<T>       tfLiteOutputValues = tfLiteInterpreter.GetOutputResult<T>(i);
+        std::vector<int32_t> tfLiteOutputShape  = tfLiteInterpreter.GetOutputShape(i);
+
+        std::vector<T>       armnnOutputValues = armnnInterpreter.GetOutputResult<T>(i);
+        std::vector<int32_t> armnnOutputShape  = armnnInterpreter.GetOutputShape(i);
+
+        armnnDelegate::CompareOutputData<T>(tfLiteOutputValues, armnnOutputValues, expectedOutputValues[i]);
+        armnnDelegate::CompareOutputShape(tfLiteOutputShape, armnnOutputShape, expectedOutputShape);
     }
 
-    armnnDelegateInterpreter.reset(nullptr);
+    tfLiteInterpreter.Cleanup();
+    armnnInterpreter.Cleanup();
 }
 
 } // anonymous namespace
\ No newline at end of file