IVGCVSW-7526 Upgrade ArmNN to Tensorflow 2.12

 When creating a flatbuffers model, we need to provide an empty buffer 0 that is
 reserved by tensorflow. When creating empty buffers for inputs and outputs we
 can not pass in an empty vector, or tflite will assume that we know how many bytes to
 allocate in advance. Instead we need to only pass in the builder.

 * Update libraries in FindTfLite.cmake
 * Add nullptr to delegate struct for OpaqueDelegateBuilder
 * Fix issue in unit tests where Flatbuffers model was not being parsed by tflite
 * Tensorflow 2.12 now includes C++ 17 features. Update our cmake build
   to require a compiler to support these features.
 * Change minimum cmake in Arm NN to 3.7 as that's the minimum for the
   delegate build.

Signed-off-by: Ryan OShea <ryan.oshea3@arm.com>
Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Signed-off-by: Colm Donelan <colm.donelan@arm.com>

Change-Id: I7d15b196b8c59b1914f8fc1c4c2f8960630c069c
diff --git a/CMakeLists.txt b/CMakeLists.txt
index be6c4c6..2bdab04 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -3,8 +3,10 @@
 # Copyright © 2020 NXP
 # SPDX-License-Identifier: MIT
 #
-cmake_minimum_required (VERSION 3.0.2) # 3.0.2 required for return() statement used in AddDllCopyCommands.cmake
+cmake_minimum_required (VERSION 3.7.0)
 project(armnn)
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
 
 set(additional_cmake_files)
 list(APPEND additional_cmake_files
diff --git a/cmake/GlobalConfig.cmake b/cmake/GlobalConfig.cmake
index 0191b0d..1f24fa5 100644
--- a/cmake/GlobalConfig.cmake
+++ b/cmake/GlobalConfig.cmake
@@ -91,7 +91,7 @@
 # Compiler flags that are always set
 set(CMAKE_POSITION_INDEPENDENT_CODE ON)
 if(COMPILER_IS_GNU_LIKE)
-    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Wextra -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion")
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wall -Wextra -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion")
     if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
         set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}  -Wno-psabi")
     endif()
diff --git a/delegate/CMakeLists.txt b/delegate/CMakeLists.txt
index d044ed9..55bdb7c 100644
--- a/delegate/CMakeLists.txt
+++ b/delegate/CMakeLists.txt
@@ -5,8 +5,10 @@
 
 cmake_minimum_required (VERSION 3.7.0)
 project(armnnDelegate)
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
 
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -Wall -Wextra -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion -Wno-comment")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -Wall -Wextra -Werror -Wold-style-cast -Wno-missing-braces -Wconversion -Wsign-conversion -Wno-comment")
 
 set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${PROJECT_SOURCE_DIR}/cmake/Modules/")
 
diff --git a/delegate/cmake/Modules/FindTfLite.cmake b/delegate/cmake/Modules/FindTfLite.cmake
index 338cde1..634aaea 100644
--- a/delegate/cmake/Modules/FindTfLite.cmake
+++ b/delegate/cmake/Modules/FindTfLite.cmake
@@ -1,5 +1,5 @@
 #
-# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
@@ -38,8 +38,6 @@
                  PATH ${TFLITE_LIB_ROOT}/_deps/flatbuffers-build)
     find_library(TfLite_cpuinfo_LIB "libcpuinfo.a" PATH
                  ${TFLITE_LIB_ROOT}/_deps/cpuinfo-build)
-    find_library(TfLite_clog_LIB "libclog.a" PATH
-                 ${TFLITE_LIB_ROOT}/_deps/clog-build)
 
     # All remaining libraries are part of libruy.
     find_library(TfLite_ruy_allocator_LIB "libruy_allocator.a" PATH
@@ -100,6 +98,8 @@
                  ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy)
     find_library(TfLite_ruy_profiler_LIB "libruy_profiler_instrumentation.a" PATH
                 ${TFLITE_LIB_ROOT}/_deps/ruy-build/ruy/profiler)
+    find_library(TfLite_pthread_pool_LIB "libpthreadpool.a" PATH
+                ${TFLITE_LIB_ROOT}/pthreadpool)
 
     ## Set TFLITE_FOUND if all libraries are satisfied for static lib
     find_package_handle_standard_args(TfLite DEFAULT_MSG TfLite_LIB TfLite_abseilstrings_LIB TfLite_farmhash_LIB TfLite_fftsg_LIB TfLite_fftsg2d_LIB
@@ -110,8 +110,8 @@
                                       TfLite_ruy_kernel_avx2_fma_LIB TfLite_ruy_kernel_avx512_LIB TfLite_ruy_kernel_avx_LIB TfLite_ruy_pack_arm_LIB
                                       TfLite_ruy_pack_avx2_fma_LIB TfLite_ruy_pack_avx512_LIB TfLite_ruy_pack_avx_LIB TfLite_ruy_prepacked_cache_LIB
                                       TfLite_ruy_prepare_packed_matrices_LIB TfLite_ruy_system_aligned_alloc_LIB TfLite_ruy_threadpool_LIB
-                                      TfLite_ruy_trmul_LIB TfLite_ruy_tune_LIB TfLite_ruy_wait_LIB TfLite_ruy_profiler_LIB TfLite_cpuinfo_LIB TfLite_clog_LIB
-                                      TfLite_abseil_synchronization_LIB)
+                                      TfLite_ruy_trmul_LIB TfLite_ruy_tune_LIB TfLite_ruy_wait_LIB TfLite_ruy_profiler_LIB TfLite_cpuinfo_LIB
+                                      TfLite_abseil_synchronization_LIB TfLite_pthread_pool_LIB)
     # Set external variables for usage in CMakeLists.txt
     if (TFLITE_FOUND)
         # WARNING! The order of these libraries is critical. Moving them
@@ -126,7 +126,7 @@
                                      ${TfLite_ruy_pack_avx2_fma_LIB} ${TfLite_ruy_pack_avx512_LIB} ${TfLite_ruy_pack_avx_LIB} ${TfLite_ruy_prepacked_cache_LIB}
                                      ${TfLite_ruy_prepare_packed_matrices_LIB} ${TfLite_ruy_system_aligned_alloc_LIB}
                                      ${TfLite_ruy_tune_LIB} ${TfLite_ruy_wait_LIB} ${TfLite_ruy_profiler_LIB}
-                                     ${TfLite_cpuinfo_LIB} ${TfLite_clog_LIB} ${TfLite_abseil_synchronization_LIB})
+                                     ${TfLite_cpuinfo_LIB} ${TfLite_abseil_synchronization_LIB} ${TfLite_pthread_pool_LIB})
     endif ()
 elseif (TfLite_LIB MATCHES .so$)
     message("-- Dynamic tensorflow lite library found, using for ArmNN build")
diff --git a/delegate/include/armnn_delegate.hpp b/delegate/include/armnn_delegate.hpp
index 159d590..9cfc397 100644
--- a/delegate/include/armnn_delegate.hpp
+++ b/delegate/include/armnn_delegate.hpp
@@ -81,6 +81,7 @@
         nullptr,                        // .CopyToBufferHandle
         nullptr,                        // .FreeBufferHandle
         kTfLiteDelegateFlagsNone,       // .flags
+        nullptr,                        // .opaque_delegate_builder
     };
 
     /// ArmNN Runtime pointer
diff --git a/delegate/src/test/ActivationTestHelper.hpp b/delegate/src/test/ActivationTestHelper.hpp
index 0f4d944..6475083 100644
--- a/delegate/src/test/ActivationTestHelper.hpp
+++ b/delegate/src/test/ActivationTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -29,7 +29,7 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+    buffers[0] = CreateBuffer(flatBufferBuilder);
 
     std::array<flatbuffers::Offset<Tensor>, 2> tensors;
     tensors[0] = CreateTensor(flatBufferBuilder,
diff --git a/delegate/src/test/ArgMinMaxTestHelper.hpp b/delegate/src/test/ArgMinMaxTestHelper.hpp
index a734c81..3e607d6 100644
--- a/delegate/src/test/ArgMinMaxTestHelper.hpp
+++ b/delegate/src/test/ArgMinMaxTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -46,7 +46,7 @@
                                     flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                             inputTensorShape.size()),
                                     tensorType,
-                                    0,
+                                    1,
                                     flatBufferBuilder.CreateString("input"),
                                     quantizationParameters);
 
@@ -54,26 +54,27 @@
                                    flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
                                                                            axisTensorShape.size()),
                                    tflite::TensorType_INT32,
-                                   1,
+                                   2,
                                    flatBufferBuilder.CreateString("axis"));
 
     auto outputTensor = CreateTensor(flatBufferBuilder,
                                      flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                              outputTensorShape.size()),
                                      outputType,
-                                     2,
+                                     3,
                                      flatBufferBuilder.CreateString("output"),
                                      quantizationParameters);
 
     std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, axisTensor, outputTensor };
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
                      flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisValue.data()),
                                                     sizeof(OutputT))));
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     std::vector<int32_t> operatorInputs = {{ 0, 1 }};
     std::vector<int> subgraphInputs = {{ 0, 1 }};
diff --git a/delegate/src/test/BatchMatMulTestHelper.hpp b/delegate/src/test/BatchMatMulTestHelper.hpp
index 42c1ed6..7437064 100644
--- a/delegate/src/test/BatchMatMulTestHelper.hpp
+++ b/delegate/src/test/BatchMatMulTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -20,184 +20,186 @@
 
 namespace
 {
+std::vector<char> CreateBatchMatMulTfLiteModel(
+        tflite::BuiltinOperator bmmOperatorCode,
+        tflite::TensorType tensorType,
+        const std::vector <int32_t>& LHSInputTensorShape,
+        const std::vector <int32_t>& RHSInputTensorShape,
+        const std::vector <int32_t>& outputTensorShape,
+        bool adjX = false,
+        bool adjY = false,
+        float quantScale = 1.0f,
+        int quantOffset  = 0)
+{
+    using namespace tflite;
+    flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
-    std::vector<char> CreateBatchMatMulTfLiteModel(
-            tflite::BuiltinOperator bmmOperatorCode,
-            tflite::TensorType tensorType,
-            const std::vector <int32_t>& LHSInputTensorShape,
-            const std::vector <int32_t>& RHSInputTensorShape,
-            const std::vector <int32_t>& outputTensorShape,
-            bool adjX = false,
-            bool adjY = false,
-            float quantScale = 1.0f,
-            int quantOffset  = 0)
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
+    auto quantizationParameters =
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+
+    std::array<flatbuffers::Offset<Tensor>, 3> tensors;
+    tensors[0] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(LHSInputTensorShape.data(),
+                                                                      LHSInputTensorShape.size()),
+                              tensorType,
+                              1,
+                              flatBufferBuilder.CreateString("LHSInput"),
+                              quantizationParameters);
+
+    tensors[1] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(RHSInputTensorShape.data(),
+                                                                      RHSInputTensorShape.size()),
+                              tensorType,
+                              2,
+                              flatBufferBuilder.CreateString("RHSInput"),
+                              quantizationParameters);
+
+    tensors[2] = CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                      outputTensorShape.size()),
+                              tensorType,
+                              3,
+                              flatBufferBuilder.CreateString("output"),
+                              quantizationParameters);
+
+    // create operator
+    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_BatchMatMulOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions = CreateBatchMatMulOptions(flatBufferBuilder,
+                                                                                adjX,
+                                                                                adjY).Union();
+
+    const std::vector<int32_t> operatorInputs{{0, 1}};
+    const std::vector<int32_t> operatorOutputs{2};
+    flatbuffers::Offset <Operator> bmmOperator =
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+                                                                   operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
+
+    const std::vector<int> subgraphInputs{{0, 1}};
+    const std::vector<int> subgraphOutputs{2};
+    flatbuffers::Offset <SubGraph> subgraph =
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
+                                                                   subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&bmmOperator, 1));
+
+    flatbuffers::Offset <flatbuffers::String> modelDescription =
+            flatBufferBuilder.CreateString("ArmnnDelegate: BatchMatMul Operator Model");
+    flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, bmmOperatorCode);
+
+    flatbuffers::Offset <Model> flatbufferModel =
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+
+    flatBufferBuilder.Finish(flatbufferModel);
+
+    return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
+                             flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+}
+
+template <typename T>
+void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode,
+                   tflite::TensorType tensorType,
+                   std::vector<armnn::BackendId>& backends,
+                   std::vector<int32_t>& LHSInputShape,
+                   std::vector<int32_t>& RHSInputShape,
+                   std::vector<int32_t>& outputShape,
+                   std::vector<T>& LHSInputValues,
+                   std::vector<T>& RHSInputValues,
+                   std::vector<T>& expectedOutputValues,
+                   bool adjX = false,
+                   bool adjY = false,
+                   float quantScale = 1.0f,
+                   int quantOffset  = 0)
+{
+    using namespace tflite;
+    std::vector<char> modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode,
+                                                                 tensorType,
+                                                                 LHSInputShape,
+                                                                 RHSInputShape,
+                                                                 outputShape,
+                                                                 adjX,
+                                                                 adjY,
+                                                                 quantScale,
+                                                                 quantOffset);
+
+    const Model* tfLiteModel = GetModel(modelBuffer.data());
+    CHECK(tfLiteModel != nullptr);
+    // Create TfLite Interpreters
+    std::unique_ptr<Interpreter> armnnDelegateInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter != nullptr);
+    CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
+
+    std::unique_ptr<Interpreter> tfLiteInterpreter;
+    CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
+                  (&tfLiteInterpreter) == kTfLiteOk);
+    CHECK(tfLiteInterpreter != nullptr);
+    CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
+
+    // Create the ArmNN Delegate
+    armnnDelegate::DelegateOptions delegateOptions(backends);
+    std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
+    CHECK(theArmnnDelegate != nullptr);
+    // Modify armnnDelegateInterpreter to use armnnDelegate
+    CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
+
+    // Set input data
+    auto tfLiteDelegateLHSInputId = tfLiteInterpreter->inputs()[0];
+    auto tfLiteDelegateLHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateLHSInputId);
+    auto tfLiteDelegateRHSInputId = tfLiteInterpreter->inputs()[1];
+    auto tfLiteDelegateRHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateRHSInputId);
+    for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
     {
-        using namespace tflite;
-        flatbuffers::FlatBufferBuilder flatBufferBuilder;
-
-        std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-        buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
-
-        auto quantizationParameters =
-                CreateQuantizationParameters(flatBufferBuilder,
-                                             0,
-                                             0,
-                                             flatBufferBuilder.CreateVector<float>({ quantScale }),
-                                             flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
-
-        std::array<flatbuffers::Offset<Tensor>, 3> tensors;
-        tensors[0] = CreateTensor(flatBufferBuilder,
-                                  flatBufferBuilder.CreateVector<int32_t>(LHSInputTensorShape.data(),
-                                                                          LHSInputTensorShape.size()),
-                                  tensorType,
-                                  0,
-                                  flatBufferBuilder.CreateString("LHSInput"),
-                                  quantizationParameters);
-
-        tensors[1] = CreateTensor(flatBufferBuilder,
-                                  flatBufferBuilder.CreateVector<int32_t>(RHSInputTensorShape.data(),
-                                                                          RHSInputTensorShape.size()),
-                                  tensorType,
-                                  0,
-                                  flatBufferBuilder.CreateString("RHSInput"),
-                                  quantizationParameters);
-
-        tensors[2] = CreateTensor(flatBufferBuilder,
-                                  flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
-                                                                          outputTensorShape.size()),
-                                  tensorType,
-                                  0,
-                                  flatBufferBuilder.CreateString("output"),
-                                  quantizationParameters);
-
-        // create operator
-        tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_BatchMatMulOptions;
-        flatbuffers::Offset<void> operatorBuiltinOptions = CreateBatchMatMulOptions(flatBufferBuilder,
-                                                                                    adjX,
-                                                                                    adjY).Union();
-
-        const std::vector<int32_t> operatorInputs{{0, 1}};
-        const std::vector<int32_t> operatorOutputs{2};
-        flatbuffers::Offset <Operator> bmmOperator =
-                CreateOperator(flatBufferBuilder,
-                               0,
-                               flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
-                               flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
-                                                                       operatorOutputs.size()),
-                               operatorBuiltinOptionsType,
-                               operatorBuiltinOptions);
-
-        const std::vector<int> subgraphInputs{{0, 1}};
-        const std::vector<int> subgraphOutputs{2};
-        flatbuffers::Offset <SubGraph> subgraph =
-                CreateSubGraph(flatBufferBuilder,
-                               flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
-                               flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
-                               flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(),
-                                                                       subgraphOutputs.size()),
-                               flatBufferBuilder.CreateVector(&bmmOperator, 1));
-
-        flatbuffers::Offset <flatbuffers::String> modelDescription =
-                flatBufferBuilder.CreateString("ArmnnDelegate: BatchMatMul Operator Model");
-        flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, bmmOperatorCode);
-
-        flatbuffers::Offset <Model> flatbufferModel =
-                CreateModel(flatBufferBuilder,
-                            TFLITE_SCHEMA_VERSION,
-                            flatBufferBuilder.CreateVector(&operatorCode, 1),
-                            flatBufferBuilder.CreateVector(&subgraph, 1),
-                            modelDescription,
-                            flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
-
-        flatBufferBuilder.Finish(flatbufferModel);
-
-        return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
-                                 flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
+        tfLiteDelegateLHSInputData[i] = LHSInputValues[i];
+    }
+    for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
+    {
+        tfLiteDelegateRHSInputData[i] = RHSInputValues[i];
     }
 
-    template <typename T>
-    void BatchMatMulTest(tflite::BuiltinOperator bmmOperatorCode,
-                       tflite::TensorType tensorType,
-                       std::vector<armnn::BackendId>& backends,
-                       std::vector<int32_t>& LHSInputShape,
-                       std::vector<int32_t>& RHSInputShape,
-                       std::vector<int32_t>& outputShape,
-                       std::vector<T>& LHSInputValues,
-                       std::vector<T>& RHSInputValues,
-                       std::vector<T>& expectedOutputValues,
-                       bool adjX = false,
-                       bool adjY = false,
-                       float quantScale = 1.0f,
-                       int quantOffset  = 0)
+    auto armnnDelegateLHSInputId = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateLHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateLHSInputId);
+    auto armnnDelegateRHSInputId = armnnDelegateInterpreter->inputs()[1];
+    auto armnnDelegateRHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateRHSInputId);
+    for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
     {
-        using namespace tflite;
-        std::vector<char> modelBuffer = CreateBatchMatMulTfLiteModel(bmmOperatorCode,
-                                                                     tensorType,
-                                                                     LHSInputShape,
-                                                                     RHSInputShape,
-                                                                     outputShape,
-                                                                     adjX,
-                                                                     adjY,
-                                                                     quantScale,
-                                                                     quantOffset);
-
-        const Model* tfLiteModel = GetModel(modelBuffer.data());
-        CHECK(tfLiteModel != nullptr);
-        // Create TfLite Interpreters
-        std::unique_ptr<Interpreter> armnnDelegateInterpreter;
-        CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                      (&armnnDelegateInterpreter) == kTfLiteOk);
-        CHECK(armnnDelegateInterpreter != nullptr);
-        CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
-
-        std::unique_ptr<Interpreter> tfLiteInterpreter;
-        CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                      (&tfLiteInterpreter) == kTfLiteOk);
-        CHECK(tfLiteInterpreter != nullptr);
-        CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
-
-        // Create the ArmNN Delegate
-        armnnDelegate::DelegateOptions delegateOptions(backends);
-        std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-                theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                                 armnnDelegate::TfLiteArmnnDelegateDelete);
-        CHECK(theArmnnDelegate != nullptr);
-        // Modify armnnDelegateInterpreter to use armnnDelegate
-        CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
-
-        // Set input data
-        auto tfLiteDelegateLHSInputId = tfLiteInterpreter->inputs()[0];
-        auto tfLiteDelegateLHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateLHSInputId);
-        auto tfLiteDelegateRHSInputId = tfLiteInterpreter->inputs()[1];
-        auto tfLiteDelegateRHSInputData = tfLiteInterpreter->typed_tensor<T>(tfLiteDelegateRHSInputId);
-        for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
-        {
-            tfLiteDelegateLHSInputData[i] = LHSInputValues[i];
-        }
-        for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
-        {
-            tfLiteDelegateRHSInputData[i] = RHSInputValues[i];
-        }
-
-        auto armnnDelegateLHSInputId = armnnDelegateInterpreter->inputs()[0];
-        auto armnnDelegateLHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateLHSInputId);
-        auto armnnDelegateRHSInputId = armnnDelegateInterpreter->inputs()[1];
-        auto armnnDelegateRHSInputData = armnnDelegateInterpreter->typed_tensor<T>(armnnDelegateRHSInputId);
-        for (unsigned int i = 0; i < LHSInputValues.size(); ++i)
-        {
-            armnnDelegateLHSInputData[i] = LHSInputValues[i];
-        }
-        for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
-        {
-            armnnDelegateRHSInputData[i] = RHSInputValues[i];
-        }
-        // Run EnqueueWorkload
-        CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
-        CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
-
-        armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter,
-                                         outputShape, expectedOutputValues);
+        armnnDelegateLHSInputData[i] = LHSInputValues[i];
     }
+    for (unsigned int i = 0; i < RHSInputValues.size(); ++i)
+    {
+        armnnDelegateRHSInputData[i] = RHSInputValues[i];
+    }
+    // Run EnqueueWorkload
+    CHECK(tfLiteInterpreter->Invoke() == kTfLiteOk);
+    CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
+
+    armnnDelegate::CompareOutputData(tfLiteInterpreter, armnnDelegateInterpreter,
+                                     outputShape, expectedOutputValues);
+}
 
 } // anonymous namespace
 
diff --git a/delegate/src/test/BatchSpaceTestHelper.hpp b/delegate/src/test/BatchSpaceTestHelper.hpp
index 464a5d9..d4fa983 100644
--- a/delegate/src/test/BatchSpaceTestHelper.hpp
+++ b/delegate/src/test/BatchSpaceTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,14 +33,16 @@
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
-    std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
-    buffers[1] = CreateBuffer(flatBufferBuilder,
+    std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder);
+    buffers[1] = CreateBuffer(flatBufferBuilder);
+    buffers[2] = CreateBuffer(flatBufferBuilder,
                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(blockData.data()),
                                                                   sizeof(int32_t) * blockData.size()));
-    buffers[2] = CreateBuffer(flatBufferBuilder,
+    buffers[3] = CreateBuffer(flatBufferBuilder,
                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cropsPadData.data()),
                                                                   sizeof(int64_t) * cropsPadData.size()));
+    buffers[4] = CreateBuffer(flatBufferBuilder);
 
     auto quantizationParameters =
             CreateQuantizationParameters(flatBufferBuilder,
@@ -60,7 +62,7 @@
                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                       inputTensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"),
                               quantizationParameters);
 
@@ -68,7 +70,7 @@
                               flatBufferBuilder.CreateVector<int32_t>(blockShape.data(),
                                                                       blockShape.size()),
                               ::tflite::TensorType_INT32,
-                              1,
+                              2,
                               flatBufferBuilder.CreateString("block"),
                               quantizationParameters);
 
@@ -76,7 +78,7 @@
                               flatBufferBuilder.CreateVector<int32_t>(cropsOrPaddingShape.data(),
                                                                       cropsOrPaddingShape.size()),
                               ::tflite::TensorType_INT32,
-                              2,
+                              3,
                               flatBufferBuilder.CreateString(cropsOrPadding),
                               quantizationParameters);
 
@@ -85,7 +87,7 @@
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               tensorType,
-                              0,
+                              4,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
diff --git a/delegate/src/test/CastTestHelper.hpp b/delegate/src/test/CastTestHelper.hpp
index 6b1d5ee..0448e65 100644
--- a/delegate/src/test/CastTestHelper.hpp
+++ b/delegate/src/test/CastTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -30,7 +30,9 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
@@ -44,14 +46,14 @@
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               inputTensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               outputTensorType,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
diff --git a/delegate/src/test/ComparisonTestHelper.hpp b/delegate/src/test/ComparisonTestHelper.hpp
index c9ccb77..db337f9 100644
--- a/delegate/src/test/ComparisonTestHelper.hpp
+++ b/delegate/src/test/ComparisonTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,7 +33,10 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
@@ -47,21 +50,21 @@
                               flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
                                                                       input0TensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input_0"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
                                                                       input1TensorShape.size()),
                               tensorType,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("input_1"),
                               quantizationParameters);
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               ::tflite::TensorType_BOOL,
-                              0);
+                              3);
 
     // create operator
     tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_EqualOptions;;
diff --git a/delegate/src/test/ControlTestHelper.hpp b/delegate/src/test/ControlTestHelper.hpp
index 0c97961..3e427e6 100644
--- a/delegate/src/test/ControlTestHelper.hpp
+++ b/delegate/src/test/ControlTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -36,7 +36,9 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
             CreateQuantizationParameters(flatBufferBuilder,
@@ -57,7 +59,7 @@
                                   flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                           inputTensorShape.size()),
                                   tensorType,
-                                  0,
+                                  1,
                                   flatBufferBuilder.CreateString("input" + std::to_string(i)),
                                   quantizationParameters);
 
@@ -71,7 +73,7 @@
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               tensorType,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
@@ -126,7 +128,7 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+    buffers[0] = CreateBuffer(flatBufferBuilder);
     buffers[1] = CreateBuffer(flatBufferBuilder,
                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
                                                              sizeof(int32_t) * axisData.size()));
diff --git a/delegate/src/test/ConvolutionTestHelper.hpp b/delegate/src/test/ConvolutionTestHelper.hpp
index ce1f951..70c1da6 100644
--- a/delegate/src/test/ConvolutionTestHelper.hpp
+++ b/delegate/src/test/ConvolutionTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -50,15 +50,17 @@
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
-    std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
-    buffers[1] = CreateBuffer(flatBufferBuilder,
+    std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder);
+    buffers[1] = CreateBuffer(flatBufferBuilder);
+    buffers[2] = CreateBuffer(flatBufferBuilder,
                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
                                                              sizeof(T) * filterData.size()));
 
-    buffers[2] = CreateBuffer(flatBufferBuilder,
+    buffers[3] = CreateBuffer(flatBufferBuilder,
                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
                                                              sizeof(B) * biasData.size()));
+    buffers[4] = CreateBuffer(flatBufferBuilder);
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
@@ -95,14 +97,14 @@
                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                       inputTensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(filterTensorShape.data(),
                                                                       filterTensorShape.size()),
                               tensorType,
-                              1,
+                              2,
                               flatBufferBuilder.CreateString("filter"),
                               filterQuantizationParameters);
 
@@ -114,14 +116,14 @@
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(), biasTensorShape.size()),
                               biasTensorType,
-                              2,
+                              3,
                               flatBufferBuilder.CreateString("bias"),
                               biasQuantizationParameters);
     tensors[3] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               tensorType,
-                              0,
+                              4,
                               flatBufferBuilder.CreateString("output"),
                               outputQuantizationParameters);
 
@@ -334,7 +336,7 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+    buffers[0] = CreateBuffer(flatBufferBuilder);
     buffers[1] = CreateBuffer(flatBufferBuilder,
                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(filterData.data()),
                                                              sizeof(T) * filterData.size()));
@@ -581,7 +583,7 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+    buffers[0] = CreateBuffer(flatBufferBuilder);
     buffers[1] = CreateBuffer(flatBufferBuilder,
                               flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(transposeData.data()),
                                                              sizeof(int32_t) * transposeData.size()));
diff --git a/delegate/src/test/DelegateOptionsTestHelper.hpp b/delegate/src/test/DelegateOptionsTestHelper.hpp
index 7e147de..00a3d95 100644
--- a/delegate/src/test/DelegateOptionsTestHelper.hpp
+++ b/delegate/src/test/DelegateOptionsTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -44,7 +44,12 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
@@ -59,35 +64,35 @@
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input_0"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               tensorType,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("input_1"),
                               quantizationParameters);
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               tensorType,
-                              0,
+                              3,
                               flatBufferBuilder.CreateString("input_2"),
                               quantizationParameters);
     tensors[3] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               tensorType,
-                              0,
+                              4,
                               flatBufferBuilder.CreateString("add"),
                               quantizationParameters);
     tensors[4] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               tensorType,
-                              0,
+                              5,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
@@ -157,7 +162,7 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
diff --git a/delegate/src/test/ElementwiseBinaryTestHelper.hpp b/delegate/src/test/ElementwiseBinaryTestHelper.hpp
index 69b0c88..09a715e 100644
--- a/delegate/src/test/ElementwiseBinaryTestHelper.hpp
+++ b/delegate/src/test/ElementwiseBinaryTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,7 +37,8 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     if (constantInput)
     {
         buffers.push_back(
@@ -47,9 +48,9 @@
     }
     else
     {
-        buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
     }
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
@@ -64,21 +65,21 @@
                               flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
                                                                       input0TensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input_0"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
                                                                       input1TensorShape.size()),
                               tensorType,
-                              1,
+                              2,
                               flatBufferBuilder.CreateString("input_1"),
                               quantizationParameters);
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               tensorType,
-                              2,
+                              3,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
diff --git a/delegate/src/test/ElementwiseUnaryTestHelper.hpp b/delegate/src/test/ElementwiseUnaryTestHelper.hpp
index dcc7074..230d0fc 100644
--- a/delegate/src/test/ElementwiseUnaryTestHelper.hpp
+++ b/delegate/src/test/ElementwiseUnaryTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -29,7 +29,7 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::array<flatbuffers::Offset<tflite::Buffer>, 1> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+    buffers[0] = CreateBuffer(flatBufferBuilder);
 
     std::array<flatbuffers::Offset<Tensor>, 2> tensors;
     tensors[0] = CreateTensor(flatBufferBuilder,
diff --git a/delegate/src/test/FillTestHelper.hpp b/delegate/src/test/FillTestHelper.hpp
index e6890a2..8479b72 100644
--- a/delegate/src/test/FillTestHelper.hpp
+++ b/delegate/src/test/FillTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -32,9 +32,7 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(
-        CreateBuffer(flatBufferBuilder,
-                     flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
                      flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(tensorShape.data()),
@@ -43,6 +41,7 @@
         CreateBuffer(flatBufferBuilder,
                      flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(fillValue.data()),
                                                     sizeof(T) * fillValue.size())));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     std::array<flatbuffers::Offset<Tensor>, 3> tensors;
     tensors[0] = CreateTensor(flatBufferBuilder,
@@ -64,7 +63,7 @@
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               tensorType,
-                              0,
+                              3,
                               flatBufferBuilder.CreateString("output"));
 
     tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_FillOptions;
diff --git a/delegate/src/test/FullyConnectedTestHelper.hpp b/delegate/src/test/FullyConnectedTestHelper.hpp
index 37062c3..a3f009a 100644
--- a/delegate/src/test/FullyConnectedTestHelper.hpp
+++ b/delegate/src/test/FullyConnectedTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,8 +37,9 @@
 {
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
-    std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+    std::array<flatbuffers::Offset<tflite::Buffer>, 5> buffers;
+    buffers[0] = CreateBuffer(flatBufferBuilder);
+    buffers[1] = CreateBuffer(flatBufferBuilder);
 
     auto biasTensorType = ::tflite::TensorType_FLOAT32;
     if (tensorType == ::tflite::TensorType_INT8)
@@ -47,14 +48,14 @@
     }
     if (constantWeights)
     {
-        buffers[1] = CreateBuffer(flatBufferBuilder,
+        buffers[2] = CreateBuffer(flatBufferBuilder,
                      flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(weightsData.data()),
                                                     sizeof(T) * weightsData.size()));
 
         if (tensorType == ::tflite::TensorType_INT8)
         {
             std::vector<int32_t> biasData = { 10 };
-            buffers[2] = CreateBuffer(flatBufferBuilder,
+            buffers[3] = CreateBuffer(flatBufferBuilder,
                                       flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
                                                                      sizeof(int32_t) * biasData.size()));
 
@@ -62,16 +63,17 @@
         else
         {
             std::vector<float> biasData = { 10 };
-            buffers[2] = CreateBuffer(flatBufferBuilder,
+            buffers[3] = CreateBuffer(flatBufferBuilder,
                                       flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(biasData.data()),
                                                                      sizeof(float) * biasData.size()));
         }
     }
     else
     {
-        buffers[1] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
-        buffers[2] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
+        buffers[2] = CreateBuffer(flatBufferBuilder);
+        buffers[3] = CreateBuffer(flatBufferBuilder);
     }
+    buffers[4] = CreateBuffer(flatBufferBuilder);
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
@@ -92,21 +94,21 @@
                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                       inputTensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input_0"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(weightsTensorShape.data(),
                                                                       weightsTensorShape.size()),
                               tensorType,
-                              1,
+                              2,
                               flatBufferBuilder.CreateString("weights"),
                               quantizationParameters);
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(biasTensorShape.data(),
                                                                       biasTensorShape.size()),
                               biasTensorType,
-                              2,
+                              3,
                               flatBufferBuilder.CreateString("bias"),
                               quantizationParameters);
 
@@ -114,7 +116,7 @@
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               tensorType,
-                              0,
+                              4,
                               flatBufferBuilder.CreateString("output"),
                               outputQuantizationParameters);
 
diff --git a/delegate/src/test/GatherNdTestHelper.hpp b/delegate/src/test/GatherNdTestHelper.hpp
index f475584..c2cf9ff 100644
--- a/delegate/src/test/GatherNdTestHelper.hpp
+++ b/delegate/src/test/GatherNdTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -32,7 +32,10 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
              CreateQuantizationParameters(flatBufferBuilder,
@@ -46,21 +49,21 @@
                               flatBufferBuilder.CreateVector<int32_t>(paramsShape.data(),
                                                                       paramsShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("params"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(),
                                                                       indicesShape.size()),
                               ::tflite::TensorType_INT32,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("indices"),
                               quantizationParameters);
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(expectedOutputShape.data(),
                                                                       expectedOutputShape.size()),
                               tensorType,
-                              0,
+                              3,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
diff --git a/delegate/src/test/GatherTestHelper.hpp b/delegate/src/test/GatherTestHelper.hpp
index fcacf04..4763e06 100644
--- a/delegate/src/test/GatherTestHelper.hpp
+++ b/delegate/src/test/GatherTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,7 +33,10 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
              CreateQuantizationParameters(flatBufferBuilder,
@@ -47,21 +50,21 @@
                               flatBufferBuilder.CreateVector<int32_t>(paramsShape.data(),
                                                                       paramsShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("params"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(indicesShape.data(),
                                                                       indicesShape.size()),
                               ::tflite::TensorType_INT32,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("indices"),
                               quantizationParameters);
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(expectedOutputShape.data(),
                                                                       expectedOutputShape.size()),
                               tensorType,
-                              0,
+                              3,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
diff --git a/delegate/src/test/LogicalTestHelper.hpp b/delegate/src/test/LogicalTestHelper.hpp
index d08a1af..2a1ff2b 100644
--- a/delegate/src/test/LogicalTestHelper.hpp
+++ b/delegate/src/test/LogicalTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,7 +33,10 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
@@ -48,21 +51,21 @@
                               flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
                                                                       input0TensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input_0"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
                                                                       input1TensorShape.size()),
                               tensorType,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("input_1"),
                               quantizationParameters);
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               tensorType,
-                              0,
+                              3,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
diff --git a/delegate/src/test/LstmTestHelper.hpp b/delegate/src/test/LstmTestHelper.hpp
index 36a6061..082d5de 100644
--- a/delegate/src/test/LstmTestHelper.hpp
+++ b/delegate/src/test/LstmTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -101,7 +101,7 @@
                                      flatBufferBuilder.CreateVector<float>({ outputQuantScale }),
                                      flatBufferBuilder.CreateVector<int64_t>({ outputQuantOffset }));
 
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
                                                                            inputShape.size()),
@@ -388,7 +388,7 @@
         operatorInputs.push_back(kTfLiteOptionalTensor);
     }
 
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(outputStateInDimensions.data(),
                                                                            outputStateInDimensions.size()),
@@ -399,7 +399,7 @@
                                    true));
     operatorInputs.push_back(buffers.size() - 1);
 
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(cellStateInDimensions.data(),
                                                                            cellStateInDimensions.size()),
@@ -493,7 +493,7 @@
         operatorInputs.push_back(kTfLiteOptionalTensor);
     }
     int outputBufferId = buffers.size();
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
                                                                            outputShape.size()),
diff --git a/delegate/src/test/NormalizationTestHelper.hpp b/delegate/src/test/NormalizationTestHelper.hpp
index ebdfdc1..510b578 100644
--- a/delegate/src/test/NormalizationTestHelper.hpp
+++ b/delegate/src/test/NormalizationTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -46,7 +46,7 @@
                                     flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                             inputTensorShape.size()),
                                     tensorType,
-                                    0,
+                                    1,
                                     flatBufferBuilder.CreateString("input"),
                                     quantizationParameters);
 
@@ -54,15 +54,16 @@
                                      flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                              outputTensorShape.size()),
                                      tensorType,
-                                     1,
+                                     2,
                                      flatBufferBuilder.CreateString("output"),
                                      quantizationParameters);
 
     std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, outputTensor };
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     std::vector<int32_t> operatorInputs = { 0 };
     std::vector<int> subgraphInputs = { 0 };
diff --git a/delegate/src/test/PackTestHelper.hpp b/delegate/src/test/PackTestHelper.hpp
index 0869228..a9e2ee1 100644
--- a/delegate/src/test/PackTestHelper.hpp
+++ b/delegate/src/test/PackTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -36,7 +36,8 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
             CreateQuantizationParameters(flatBufferBuilder,
@@ -57,7 +58,7 @@
                                   flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                           inputTensorShape.size()),
                                   tensorType,
-                                  0,
+                                  1,
                                   flatBufferBuilder.CreateString("input" + std::to_string(i)),
                                   quantizationParameters);
 
diff --git a/delegate/src/test/PadTestHelper.hpp b/delegate/src/test/PadTestHelper.hpp
index 5b9a1bc..e96bc4b 100644
--- a/delegate/src/test/PadTestHelper.hpp
+++ b/delegate/src/test/PadTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -70,12 +70,12 @@
     std::vector<flatbuffers::Offset<Tensor>> tensors = { inputTensor, paddingTensor, outputTensor};
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
                      flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(paddingDim.data()),
                                                     sizeof(int32_t) * paddingDim.size())));
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     std::vector<int32_t> operatorInputs;
     std::vector<int> subgraphInputs;
diff --git a/delegate/src/test/Pooling2dTestHelper.hpp b/delegate/src/test/Pooling2dTestHelper.hpp
index b5d36b0..c7457db 100644
--- a/delegate/src/test/Pooling2dTestHelper.hpp
+++ b/delegate/src/test/Pooling2dTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -38,8 +38,9 @@
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
-    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    flatbuffers::Offset<tflite::Buffer> buffers[3] = {CreateBuffer(flatBufferBuilder),
+                                                                        CreateBuffer(flatBufferBuilder),
+                                                                        CreateBuffer(flatBufferBuilder)};
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
@@ -48,22 +49,21 @@
                                      flatBufferBuilder.CreateVector<float>({ quantScale }),
                                      flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
 
-    std::array<flatbuffers::Offset<Tensor>, 2> tensors;
-    tensors[0] = CreateTensor(flatBufferBuilder,
-                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
-                                                                      inputTensorShape.size()),
+    flatbuffers::Offset<Tensor> tensors[2] {
+         CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(inputTensorShape),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"),
-                              quantizationParameters);
+                              quantizationParameters),
 
-    tensors[1] = CreateTensor(flatBufferBuilder,
-                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
-                                                                      outputTensorShape.size()),
+         CreateTensor(flatBufferBuilder,
+                              flatBufferBuilder.CreateVector<int32_t>(outputTensorShape),
                               tensorType,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("output"),
-                              quantizationParameters);
+                              quantizationParameters)
+    };
 
     // create operator
     tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_Pool2DOptions;
@@ -80,18 +80,18 @@
     flatbuffers::Offset <Operator> poolingOperator =
         CreateOperator(flatBufferBuilder,
                        0,
-                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs),
+                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs),
                        operatorBuiltinOptionsType,
                        operatorBuiltinOptions);
 
-    const std::vector<int> subgraphInputs{0};
-    const std::vector<int> subgraphOutputs{1};
+    const int subgraphInputs[1] = {0};
+    const int subgraphOutputs[1] = {1};
     flatbuffers::Offset <SubGraph> subgraph =
         CreateSubGraph(flatBufferBuilder,
-                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                       flatBufferBuilder.CreateVector(tensors, 2),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs, 1),
+                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs, 1),
                        flatBufferBuilder.CreateVector(&poolingOperator, 1));
 
     flatbuffers::Offset <flatbuffers::String> modelDescription =
@@ -104,7 +104,7 @@
                     flatBufferBuilder.CreateVector(&operatorCode, 1),
                     flatBufferBuilder.CreateVector(&subgraph, 1),
                     modelDescription,
-                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+                    flatBufferBuilder.CreateVector(buffers, 3));
 
     flatBufferBuilder.Finish(flatbufferModel);
 
diff --git a/delegate/src/test/Pooling3dTestHelper.hpp b/delegate/src/test/Pooling3dTestHelper.hpp
index f5f5cc3..47e00f7 100644
--- a/delegate/src/test/Pooling3dTestHelper.hpp
+++ b/delegate/src/test/Pooling3dTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -46,7 +46,10 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
diff --git a/delegate/src/test/PreluTestHelper.hpp b/delegate/src/test/PreluTestHelper.hpp
index b6c18cc..b50c377 100644
--- a/delegate/src/test/PreluTestHelper.hpp
+++ b/delegate/src/test/PreluTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,10 +33,12 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
-
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector(
         reinterpret_cast<const uint8_t *>(alphaData.data()), sizeof(float) * alphaData.size())));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
@@ -49,7 +51,7 @@
                                     flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
                                                                           inputShape.size()),
                                     tensorType,
-                                    0,
+                                    1,
                                     flatBufferBuilder.CreateString("input"),
                                     quantizationParameters);
 
@@ -57,7 +59,7 @@
                                     flatBufferBuilder.CreateVector<int32_t>(alphaShape.data(),
                                                                           alphaShape.size()),
                                     tensorType,
-                                    1,
+                                    2,
                                     flatBufferBuilder.CreateString("alpha"),
                                     quantizationParameters);
 
@@ -65,7 +67,7 @@
                                      flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
                                                                            outputShape.size()),
                                      tensorType,
-                                     0,
+                                     3,
                                      flatBufferBuilder.CreateString("output"),
                                      quantizationParameters);
 
diff --git a/delegate/src/test/QuantizationTestHelper.hpp b/delegate/src/test/QuantizationTestHelper.hpp
index e415504..a8b1022 100644
--- a/delegate/src/test/QuantizationTestHelper.hpp
+++ b/delegate/src/test/QuantizationTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -31,7 +31,10 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
 
     auto quantizationParameters =
             CreateQuantizationParameters(flatBufferBuilder,
@@ -46,14 +49,14 @@
                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                       inputTensorShape.size()),
                               inputTensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               outputTensorType,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
diff --git a/delegate/src/test/RedefineTestHelper.hpp b/delegate/src/test/RedefineTestHelper.hpp
index 6f06157..7f811d5 100644
--- a/delegate/src/test/RedefineTestHelper.hpp
+++ b/delegate/src/test/RedefineTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -22,44 +22,36 @@
 {
 
 std::vector<char> CreateRedefineTfLiteModel(
-    tflite::BuiltinOperator redefineOperatorCode,
-    tflite::TensorType tensorType,
-    const std::vector<int32_t>& inputTensorShape,
-    const std::vector<int32_t>& outputTensorShape,
-    const std::vector<int32_t>& targetShape,
-    bool useOption = true,
-    float quantScale = 1.0f,
-    int quantOffset  = 0)
+        tflite::BuiltinOperator redefineOperatorCode,
+        tflite::TensorType tensorType,
+        const std::vector<int32_t>& inputTensorShape,
+        const std::vector<int32_t>& outputTensorShape,
+        const std::vector<int32_t>& targetShape,
+        bool useOption = true,
+        float quantScale = 1.0f,
+        int quantOffset  = 0)
 {
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
-        CreateQuantizationParameters(flatBufferBuilder,
-                                     0,
-                                     0,
-                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
-                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
 
     auto inputTensor = CreateTensor(flatBufferBuilder,
                                     flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                             inputTensorShape.size()),
                                     tensorType,
-                                    0,
+                                    1,
                                     flatBufferBuilder.CreateString("input"),
                                     quantizationParameters);
 
-    auto outputTensor = CreateTensor(flatBufferBuilder,
-                                     flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
-                                                                             outputTensorShape.size()),
-                                     tensorType,
-                                     1,
-                                     flatBufferBuilder.CreateString("output"),
-                                     quantizationParameters);
-
     std::vector<flatbuffers::Offset<Tensor>> tensors;
     std::vector<int32_t> operatorInputs;
     std::vector<int> subgraphInputs;
@@ -67,25 +59,43 @@
 
     if (useOption)
     {
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
+        auto outputTensor = CreateTensor(flatBufferBuilder,
+                                         flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                                 outputTensorShape.size()),
+                                         tensorType,
+                                         2,
+                                         flatBufferBuilder.CreateString("output"),
+                                         quantizationParameters);
         tensors = { inputTensor, outputTensor};
         operatorInputs = {0};
         subgraphInputs = {0};
         operatorBuiltinOptions = CreateReshapeOptions(
-            flatBufferBuilder,
-            flatBufferBuilder.CreateVector(targetShape.data(), targetShape.size())).Union();
+                flatBufferBuilder,
+                flatBufferBuilder.CreateVector(targetShape.data(), targetShape.size())).Union();
     }
     else
     {
         buffers.push_back(
-            CreateBuffer(flatBufferBuilder,
-                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(targetShape.data()),
-                                                        sizeof(int32_t) * targetShape.size())));
+                CreateBuffer(flatBufferBuilder,
+                             flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(targetShape.data()),
+                                                            sizeof(int32_t) * targetShape.size())));
         int32_t size = static_cast<int32_t>(targetShape.size());
         auto shapeTensor = CreateTensor(flatBufferBuilder,
                                         flatBufferBuilder.CreateVector<int32_t>( { size } ),
                                         tflite::TensorType_INT32,
                                         2,
                                         flatBufferBuilder.CreateString("shape"));
+
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
+        auto outputTensor = CreateTensor(flatBufferBuilder,
+                                         flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
+                                                                                 outputTensorShape.size()),
+                                         tensorType,
+                                         3,
+                                         flatBufferBuilder.CreateString("output"),
+                                         quantizationParameters);
+
         tensors = { inputTensor, outputTensor, shapeTensor };
         operatorInputs = {0, 2};
         subgraphInputs = {0, 2};
@@ -97,33 +107,33 @@
 
     const std::vector<int32_t> operatorOutputs{1};
     flatbuffers::Offset <Operator> redefineOperator =
-        CreateOperator(flatBufferBuilder,
-                       0,
-                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
-                       operatorBuiltinOptionsType,
-                       operatorBuiltinOptions);
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
 
     const std::vector<int> subgraphOutputs{1};
     flatbuffers::Offset <SubGraph> subgraph =
-        CreateSubGraph(flatBufferBuilder,
-                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
-                       flatBufferBuilder.CreateVector(&redefineOperator, 1));
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&redefineOperator, 1));
 
     flatbuffers::Offset <flatbuffers::String> modelDescription =
-        flatBufferBuilder.CreateString("ArmnnDelegate: Reshape Operator Model");
+            flatBufferBuilder.CreateString("ArmnnDelegate: Reshape Operator Model");
     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
                                                                          redefineOperatorCode);
 
     flatbuffers::Offset <Model> flatbufferModel =
-        CreateModel(flatBufferBuilder,
-                    TFLITE_SCHEMA_VERSION,
-                    flatBufferBuilder.CreateVector(&operatorCode, 1),
-                    flatBufferBuilder.CreateVector(&subgraph, 1),
-                    modelDescription,
-                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
     flatBufferBuilder.Finish(flatbufferModel);
 
@@ -172,8 +182,8 @@
     // Create the ArmNN Delegate
     armnnDelegate::DelegateOptions delegateOptions(backends);
     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
     CHECK(theArmnnDelegate != nullptr);
     // Modify armnnDelegateInterpreter to use armnnDelegate
     CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
diff --git a/delegate/src/test/ReduceTestHelper.hpp b/delegate/src/test/ReduceTestHelper.hpp
index 5457adb..f500736 100644
--- a/delegate/src/test/ReduceTestHelper.hpp
+++ b/delegate/src/test/ReduceTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,14 +37,17 @@
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
-    std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
-    buffers[1] = CreateBuffer(flatBufferBuilder,
-                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
-                                                             sizeof(int32_t) * axisData.size()));
+    flatbuffers::Offset<tflite::Buffer> buffers[4] = {
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
+                                                        sizeof(int32_t) * axisData.size())),
+            CreateBuffer(flatBufferBuilder)
+    };
 
     flatbuffers::Offset<tflite::QuantizationParameters> quantizationParametersAxis
-    = CreateQuantizationParameters(flatBufferBuilder);
+            = CreateQuantizationParameters(flatBufferBuilder);
 
     flatbuffers::Offset<tflite::QuantizationParameters> quantizationParameters;
 
@@ -81,7 +84,7 @@
                               flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
                                                                       input0TensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"),
                               quantizationParameters);
 
@@ -89,7 +92,7 @@
                               flatBufferBuilder.CreateVector<int32_t>(input1TensorShape.data(),
                                                                       input1TensorShape.size()),
                               ::tflite::TensorType_INT32,
-                              1,
+                              2,
                               flatBufferBuilder.CreateString("axis"),
                               quantizationParametersAxis);
 
@@ -98,7 +101,7 @@
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               tensorType,
-                              0,
+                              3,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
@@ -135,7 +138,7 @@
                         flatBufferBuilder.CreateVector(&operatorCode, 1),
                         flatBufferBuilder.CreateVector(&subgraph, 1),
                         modelDescription,
-                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+                        flatBufferBuilder.CreateVector(buffers, 4));
 
     flatBufferBuilder.Finish(flatbufferModel);
 
diff --git a/delegate/src/test/ResizeTestHelper.hpp b/delegate/src/test/ResizeTestHelper.hpp
index 030b2a7..6937a4b 100644
--- a/delegate/src/test/ResizeTestHelper.hpp
+++ b/delegate/src/test/ResizeTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -32,31 +32,33 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     buffers.push_back(CreateBuffer(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector(
                                            reinterpret_cast<const uint8_t*>(sizeTensorData.data()),
                                            sizeof(int32_t) * sizeTensorData.size())));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     std::array<flatbuffers::Offset<Tensor>, 3> tensors;
     tensors[0] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(), inputTensorShape.size()),
                               inputTensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input_tensor"));
 
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(sizeTensorShape.data(),
                                                                       sizeTensorShape.size()),
                               TensorType_INT32,
-                              1,
+                              2,
                               flatBufferBuilder.CreateString("size_input_tensor"));
 
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               inputTensorType,
-                              0,
+                              3,
                               flatBufferBuilder.CreateString("output_tensor"));
 
     // Create Operator
diff --git a/delegate/src/test/RoundTestHelper.hpp b/delegate/src/test/RoundTestHelper.hpp
index 3a35ee0..6638607 100644
--- a/delegate/src/test/RoundTestHelper.hpp
+++ b/delegate/src/test/RoundTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -30,7 +30,9 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
@@ -44,14 +46,14 @@
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               tensorType,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
diff --git a/delegate/src/test/ShapeTestHelper.hpp b/delegate/src/test/ShapeTestHelper.hpp
index 854c508..9b3d574 100644
--- a/delegate/src/test/ShapeTestHelper.hpp
+++ b/delegate/src/test/ShapeTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -31,7 +31,9 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     auto quantizationParameters =
              CreateQuantizationParameters(flatBufferBuilder,
@@ -45,14 +47,14 @@
                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                       inputTensorShape.size()),
                               inputTensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               outputTensorType,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
diff --git a/delegate/src/test/SliceTestHelper.hpp b/delegate/src/test/SliceTestHelper.hpp
index 4a2537f..94c076b 100644
--- a/delegate/src/test/SliceTestHelper.hpp
+++ b/delegate/src/test/SliceTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -35,39 +35,42 @@
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
-    std::array<flatbuffers::Offset<tflite::Buffer>, 3> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
-    buffers[1] = CreateBuffer(flatBufferBuilder,
-                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
-                                                             sizeof(int32_t) * beginTensorData.size()));
-    buffers[2] = CreateBuffer(flatBufferBuilder,
-                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(sizeTensorData.data()),
-                                                             sizeof(int32_t) * sizeTensorData.size()));
+    flatbuffers::Offset<tflite::Buffer> buffers[5] = {
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder,
+            flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
+            sizeof(int32_t) * beginTensorData.size())),
+            CreateBuffer(flatBufferBuilder,
+            flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(sizeTensorData.data()),
+            sizeof(int32_t) * sizeTensorData.size())),
+            CreateBuffer(flatBufferBuilder)
+    };
 
     std::array<flatbuffers::Offset<Tensor>, 4> tensors;
     tensors[0] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                       inputTensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"));
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(beginTensorShape.data(),
                                                                       beginTensorShape.size()),
                               ::tflite::TensorType_INT32,
-                              1,
+                              2,
                               flatBufferBuilder.CreateString("begin_tensor"));
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(sizeTensorShape.data(),
                                                                       sizeTensorShape.size()),
                               ::tflite::TensorType_INT32,
-                              2,
+                              3,
                               flatBufferBuilder.CreateString("size_tensor"));
     tensors[3] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               tensorType,
-                              0,
+                              4,
                               flatBufferBuilder.CreateString("output"));
 
 
@@ -105,7 +108,7 @@
                     flatBufferBuilder.CreateVector(&operatorCode, 1),
                     flatBufferBuilder.CreateVector(&subgraph, 1),
                     modelDescription,
-                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+                    flatBufferBuilder.CreateVector(buffers, 5));
 
     flatBufferBuilder.Finish(flatbufferModel);
 
diff --git a/delegate/src/test/SoftmaxTestHelper.hpp b/delegate/src/test/SoftmaxTestHelper.hpp
index bd32c21..f3367f9 100644
--- a/delegate/src/test/SoftmaxTestHelper.hpp
+++ b/delegate/src/test/SoftmaxTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -28,19 +28,21 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     std::array<flatbuffers::Offset<Tensor>, 2> tensors;
     tensors[0] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               tensorType,
-                              0);
+                              1);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(tensorShape.data(),
                                                                       tensorShape.size()),
                               tensorType,
-                              0);
+                              2);
 
     const std::vector<int32_t> operatorInputs({0});
     const std::vector<int32_t> operatorOutputs({1});
diff --git a/delegate/src/test/SpaceDepthTestHelper.hpp b/delegate/src/test/SpaceDepthTestHelper.hpp
index d9a783c..737e199 100644
--- a/delegate/src/test/SpaceDepthTestHelper.hpp
+++ b/delegate/src/test/SpaceDepthTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,21 +37,23 @@
                                      flatBufferBuilder.CreateVector<int64_t>({ 0 }));
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
 
     std::array<flatbuffers::Offset<Tensor>, 2> tensors;
     tensors[0] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                       inputTensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               tensorType,
-                              0,
+                              2,
                               flatBufferBuilder.CreateString("output"),
                               quantizationParameters);
 
diff --git a/delegate/src/test/SplitTestHelper.hpp b/delegate/src/test/SplitTestHelper.hpp
index 31fc7d5..3c5f50f 100644
--- a/delegate/src/test/SplitTestHelper.hpp
+++ b/delegate/src/test/SplitTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -35,11 +35,12 @@
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
-    std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
-    buffers[1] = CreateBuffer(flatBufferBuilder,
-                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
-                                                             sizeof(int32_t) * axisData.size()));
+    std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder,
+                                   flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(axisData.data()),
+                                                                  sizeof(int32_t) * axisData.size())));
 
     auto quantizationParameters =
             CreateQuantizationParameters(flatBufferBuilder,
@@ -53,27 +54,28 @@
                               flatBufferBuilder.CreateVector<int32_t>(axisTensorShape.data(),
                                                                       axisTensorShape.size()),
                               ::tflite::TensorType_INT32,
-                              1,
+                              2,
                               flatBufferBuilder.CreateString("axis"),
                               quantizationParameters);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                       inputTensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"),
                               quantizationParameters);
 
     // Create output tensor
     for (unsigned int i = 0; i < outputTensorShapes.size(); ++i)
     {
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
         tensors[i + 2] = CreateTensor(flatBufferBuilder,
-                                  flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
-                                                                          outputTensorShapes[i].size()),
-                                  tensorType,
-                                  0,
-                                  flatBufferBuilder.CreateString("output"),
-                                  quantizationParameters);
+                                      flatBufferBuilder.CreateVector<int32_t>(outputTensorShapes[i].data(),
+                                                                              outputTensorShapes[i].size()),
+                                      tensorType,
+                                      (i+3),
+                                      flatBufferBuilder.CreateString("output"),
+                                      quantizationParameters);
     }
 
     // create operator. Mean uses ReducerOptions.
@@ -109,7 +111,7 @@
                         flatBufferBuilder.CreateVector(&operatorCode, 1),
                         flatBufferBuilder.CreateVector(&subgraph, 1),
                         modelDescription,
-                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+                        flatBufferBuilder.CreateVector(buffers));
 
     flatBufferBuilder.Finish(flatbufferModel);
 
@@ -144,21 +146,21 @@
     // Create TfLite Interpreters
     std::unique_ptr<Interpreter> armnnDelegate;
     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegate) == kTfLiteOk);
+                  (&armnnDelegate) == kTfLiteOk);
     CHECK(armnnDelegate != nullptr);
     CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
 
     std::unique_ptr<Interpreter> tfLiteDelegate;
     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteDelegate) == kTfLiteOk);
+                  (&tfLiteDelegate) == kTfLiteOk);
     CHECK(tfLiteDelegate != nullptr);
     CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
 
     // Create the ArmNN Delegate
     armnnDelegate::DelegateOptions delegateOptions(backends);
     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                     armnnDelegate::TfLiteArmnnDelegateDelete);
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
     CHECK(theArmnnDelegate != nullptr);
 
     // Modify armnnDelegateInterpreter to use armnnDelegate
@@ -210,11 +212,11 @@
                                                              sizeof(int32_t) * axisData.size()));
 
     auto quantizationParameters =
-        CreateQuantizationParameters(flatBufferBuilder,
-                                     0,
-                                     0,
-                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
-                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+            CreateQuantizationParameters(flatBufferBuilder,
+                                         0,
+                                         0,
+                                         flatBufferBuilder.CreateVector<float>({ quantScale }),
+                                         flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
 
     std::array<flatbuffers::Offset<Tensor>, 5> tensors;
     tensors[0] = CreateTensor(flatBufferBuilder,
@@ -258,33 +260,33 @@
     const std::vector<int> operatorInputs{ {0, 1, 2} };
     const std::vector<int> operatorOutputs{ {3, 4} };
     flatbuffers::Offset <Operator> controlOperator =
-        CreateOperator(flatBufferBuilder,
-                       0,
-                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
-                       operatorBuiltinOptionsType,
-                       operatorBuiltinOptions);
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           operatorBuiltinOptionsType,
+                           operatorBuiltinOptions);
 
     const std::vector<int> subgraphInputs{ {0, 1, 2} };
     const std::vector<int> subgraphOutputs{ {3, 4} };
     flatbuffers::Offset <SubGraph> subgraph =
-        CreateSubGraph(flatBufferBuilder,
-                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
-                       flatBufferBuilder.CreateVector(&controlOperator, 1));
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&controlOperator, 1));
 
     flatbuffers::Offset <flatbuffers::String> modelDescription =
-        flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model");
+            flatBufferBuilder.CreateString("ArmnnDelegate: SPLIT_V Operator Model");
     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder, BuiltinOperator_SPLIT_V);
 
     flatbuffers::Offset <Model> flatbufferModel =
-        CreateModel(flatBufferBuilder,
-                    TFLITE_SCHEMA_VERSION,
-                    flatBufferBuilder.CreateVector(&operatorCode, 1),
-                    flatBufferBuilder.CreateVector(&subgraph, 1),
-                    modelDescription,
-                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
 
     flatBufferBuilder.Finish(flatbufferModel);
 
diff --git a/delegate/src/test/StridedSliceTestHelper.hpp b/delegate/src/test/StridedSliceTestHelper.hpp
index 2bca4fd..ef944d7 100644
--- a/delegate/src/test/StridedSliceTestHelper.hpp
+++ b/delegate/src/test/StridedSliceTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -43,48 +43,51 @@
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
-    std::array<flatbuffers::Offset<tflite::Buffer>, 4> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
-    buffers[1] = CreateBuffer(flatBufferBuilder,
-                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
-                                                             sizeof(int32_t) * beginTensorData.size()));
-    buffers[2] = CreateBuffer(flatBufferBuilder,
-                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(endTensorData.data()),
-                                                             sizeof(int32_t) * endTensorData.size()));
-    buffers[3] = CreateBuffer(flatBufferBuilder,
-                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(strideTensorData.data()),
-                                                             sizeof(int32_t) * strideTensorData.size()));
+    flatbuffers::Offset<tflite::Buffer> buffers[6] = {
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(beginTensorData.data()),
+                                                        sizeof(int32_t) * beginTensorData.size())),
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(endTensorData.data()),
+                                                        sizeof(int32_t) * endTensorData.size())),
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(strideTensorData.data()),
+                                                        sizeof(int32_t) * strideTensorData.size())),
+            CreateBuffer(flatBufferBuilder)
+    };
 
     std::array<flatbuffers::Offset<Tensor>, 5> tensors;
     tensors[0] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                       inputTensorShape.size()),
                               tensorType,
-                              0,
+                              1,
                               flatBufferBuilder.CreateString("input"));
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(beginTensorShape.data(),
                                                                       beginTensorShape.size()),
                               ::tflite::TensorType_INT32,
-                              1,
+                              2,
                               flatBufferBuilder.CreateString("begin_tensor"));
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(endTensorShape.data(),
                                                                       endTensorShape.size()),
                               ::tflite::TensorType_INT32,
-                              2,
+                              3,
                               flatBufferBuilder.CreateString("end_tensor"));
     tensors[3] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(strideTensorShape.data(),
                                                                       strideTensorShape.size()),
                               ::tflite::TensorType_INT32,
-                              3,
+                              4,
                               flatBufferBuilder.CreateString("stride_tensor"));
     tensors[4] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
                               tensorType,
-                              0,
+                              5,
                               flatBufferBuilder.CreateString("output"));
 
 
@@ -127,7 +130,7 @@
                         flatBufferBuilder.CreateVector(&operatorCode, 1),
                         flatBufferBuilder.CreateVector(&subgraph, 1),
                         modelDescription,
-                        flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+                        flatBufferBuilder.CreateVector(buffers, 6));
 
     flatBufferBuilder.Finish(flatbufferModel);
 
@@ -177,21 +180,21 @@
     // Create TfLite Interpreters
     std::unique_ptr<Interpreter> armnnDelegate;
     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegate) == kTfLiteOk);
+                  (&armnnDelegate) == kTfLiteOk);
     CHECK(armnnDelegate != nullptr);
     CHECK(armnnDelegate->AllocateTensors() == kTfLiteOk);
 
     std::unique_ptr<Interpreter> tfLiteDelegate;
     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteDelegate) == kTfLiteOk);
+                  (&tfLiteDelegate) == kTfLiteOk);
     CHECK(tfLiteDelegate != nullptr);
     CHECK(tfLiteDelegate->AllocateTensors() == kTfLiteOk);
 
     // Create the ArmNN Delegate
     armnnDelegate::DelegateOptions delegateOptions(backends);
     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-                                   theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                                                    armnnDelegate::TfLiteArmnnDelegateDelete);
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
     CHECK(theArmnnDelegate != nullptr);
 
     // Modify armnnDelegateInterpreter to use armnnDelegate
diff --git a/delegate/src/test/TransposeTestHelper.hpp b/delegate/src/test/TransposeTestHelper.hpp
index 1d55273..4479c48 100644
--- a/delegate/src/test/TransposeTestHelper.hpp
+++ b/delegate/src/test/TransposeTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -26,53 +26,56 @@
 {
     using namespace tflite;
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
-    std::array<flatbuffers::Offset<tflite::Buffer>, 2> buffers;
-    buffers[0] = CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({}));
-    buffers[1] = CreateBuffer(flatBufferBuilder,
-                              flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputPermVec.data()),
-                                                             sizeof(int32_t) * inputPermVec.size()));
+    flatbuffers::Offset<tflite::Buffer> buffers[4]{
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder),
+            CreateBuffer(flatBufferBuilder,
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(inputPermVec.data()),
+                                                        sizeof(int32_t) * inputPermVec.size())),
+            CreateBuffer(flatBufferBuilder)
+    };
     std::array<flatbuffers::Offset<Tensor>, 3> tensors;
     tensors[0] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(input0TensorShape.data(),
                                                                       input0TensorShape.size()),
-                              tensorType, 0);
+                              tensorType, 1);
     tensors[1] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(inputPermVecShape.data(),
                                                                       inputPermVecShape.size()),
-                              tflite::TensorType_INT32, 1,
+                              tflite::TensorType_INT32, 2,
                               flatBufferBuilder.CreateString("permutation_vector"));
     tensors[2] = CreateTensor(flatBufferBuilder,
                               flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                       outputTensorShape.size()),
-                              tensorType);
+                              tensorType,3);
     const std::vector<int32_t> operatorInputs{0, 1};
     const std::vector<int32_t> operatorOutputs{2};
     flatbuffers::Offset <Operator> transposeOperator =
-        CreateOperator(flatBufferBuilder,
-                       0,
-                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
-                       BuiltinOptions_TransposeOptions,
-                       CreateTransposeOptions(flatBufferBuilder).Union());
+            CreateOperator(flatBufferBuilder,
+                           0,
+                           flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
+                           BuiltinOptions_TransposeOptions,
+                           CreateTransposeOptions(flatBufferBuilder).Union());
     const std::vector<int> subgraphInputs{0, 1};
     const std::vector<int> subgraphOutputs{2};
     flatbuffers::Offset <SubGraph> subgraph =
-        CreateSubGraph(flatBufferBuilder,
-                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
-                       flatBufferBuilder.CreateVector(&transposeOperator, 1));
+            CreateSubGraph(flatBufferBuilder,
+                           flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphInputs.data(), subgraphInputs.size()),
+                           flatBufferBuilder.CreateVector<int32_t>(subgraphOutputs.data(), subgraphOutputs.size()),
+                           flatBufferBuilder.CreateVector(&transposeOperator, 1));
     flatbuffers::Offset <flatbuffers::String> modelDescription =
-        flatBufferBuilder.CreateString("ArmnnDelegate: Transpose Operator Model");
+            flatBufferBuilder.CreateString("ArmnnDelegate: Transpose Operator Model");
     flatbuffers::Offset <OperatorCode> operatorCode = CreateOperatorCode(flatBufferBuilder,
                                                                          tflite::BuiltinOperator_TRANSPOSE);
     flatbuffers::Offset <Model> flatbufferModel =
-        CreateModel(flatBufferBuilder,
-                    TFLITE_SCHEMA_VERSION,
-                    flatBufferBuilder.CreateVector(&operatorCode, 1),
-                    flatBufferBuilder.CreateVector(&subgraph, 1),
-                    modelDescription,
-                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+            CreateModel(flatBufferBuilder,
+                        TFLITE_SCHEMA_VERSION,
+                        flatBufferBuilder.CreateVector(&operatorCode, 1),
+                        flatBufferBuilder.CreateVector(&subgraph, 1),
+                        modelDescription,
+                        flatBufferBuilder.CreateVector(buffers, 4));
     flatBufferBuilder.Finish(flatbufferModel);
     return std::vector<char>(flatBufferBuilder.GetBufferPointer(),
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
@@ -104,21 +107,21 @@
     // Create TfLite Interpreters
     std::unique_ptr<Interpreter> armnnDelegateInterpreter;
     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&armnnDelegateInterpreter) == kTfLiteOk);
+                  (&armnnDelegateInterpreter) == kTfLiteOk);
     CHECK(armnnDelegateInterpreter != nullptr);
     CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
 
     std::unique_ptr<Interpreter> tfLiteInterpreter;
     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-              (&tfLiteInterpreter) == kTfLiteOk);
+                  (&tfLiteInterpreter) == kTfLiteOk);
     CHECK(tfLiteInterpreter != nullptr);
     CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
 
     // Create the ArmNN Delegate
     armnnDelegate::DelegateOptions delegateOptions(backends);
     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-        theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                         armnnDelegate::TfLiteArmnnDelegateDelete);
+            theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                             armnnDelegate::TfLiteArmnnDelegateDelete);
     CHECK(theArmnnDelegate != nullptr);
     // Modify armnnDelegateInterpreter to use armnnDelegate
     CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
diff --git a/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp b/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp
index 9d6ef87..10555ac 100644
--- a/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp
+++ b/delegate/src/test/UnidirectionalSequenceLstmTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -19,7 +19,6 @@
 
 #include <doctest/doctest.h>
 
-
 #include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/NumericCast.hpp>
 #include <armnn/TypesUtils.hpp>
@@ -33,7 +32,7 @@
 namespace
 {
 
-template <typename T>
+template<typename T>
 std::vector<char> CreateUnidirectionalSequenceLstmTfLiteModel(tflite::TensorType tensorType,
                                                               int32_t batchSize,
                                                               int32_t timeSize,
@@ -78,7 +77,7 @@
                                                               float clippingThresProj,
                                                               bool isTimeMajor,
                                                               float quantScale,
-                                                              int quantOffset  = 0)
+                                                              int quantOffset = 0)
 {
 
     std::vector<int32_t> tensorInfo0{};
@@ -105,39 +104,41 @@
 
     std::vector<int> operatorInputs;
     using namespace tflite;
-    flatbuffers::FlatBufferBuilder flatBufferBuilder;
+    flatbuffers::FlatBufferBuilder                   flatBufferBuilder;
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    std::vector<flatbuffers::Offset<Tensor>> tensors;
+    std::vector<flatbuffers::Offset<Tensor>>         tensors;
 
     auto quantizationParameters =
-        CreateQuantizationParameters(flatBufferBuilder,
-                                     0,
-                                     0,
-                                     flatBufferBuilder.CreateVector<float>({ 1.0f }),
-                                     flatBufferBuilder.CreateVector<int64_t>({ 0 }));
+             CreateQuantizationParameters(flatBufferBuilder,
+                                          0,
+                                          0,
+                                          flatBufferBuilder.CreateVector<float>({1.0f}),
+                                          flatBufferBuilder.CreateVector<int64_t>({0}));
 
     auto weightQuantizationParameters =
-        CreateQuantizationParameters(flatBufferBuilder,
-                                     0,
-                                     0,
-                                     flatBufferBuilder.CreateVector<float>({ quantScale }),
-                                     flatBufferBuilder.CreateVector<int64_t>({ quantOffset }));
+             CreateQuantizationParameters(flatBufferBuilder,
+                                          0,
+                                          0,
+                                          flatBufferBuilder.CreateVector<float>({quantScale}),
+                                          flatBufferBuilder.CreateVector<int64_t>({quantOffset}));
 
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(inputShape.data(),
                                                                            inputShape.size()),
                                    ::tflite::TensorType_FLOAT32,
                                    buffers.size() - 1,
                                    flatBufferBuilder.CreateString("input_0")));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
     if (hasInputToInputWeights)
     {
         buffers.push_back(
             CreateBuffer(flatBufferBuilder,
-                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToInputWeights.data()),
-                                                        sizeof(T) * inputToInputWeights.size())));
+                         flatBufferBuilder.CreateVector(
+                             reinterpret_cast<const uint8_t*>(inputToInputWeights.data()),
+                             sizeof(T) * inputToInputWeights.size())));
         tensors.push_back(CreateTensor(flatBufferBuilder,
                                        flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
                                                                                tensorInfoInputSize.size()),
@@ -145,7 +146,7 @@
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("inputToInputWeights"),
                                        weightQuantizationParameters));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
@@ -154,8 +155,9 @@
 
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
-                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToForgetWeights.data()),
-                                                    sizeof(T) * inputToForgetWeights.size())));
+                     flatBufferBuilder.CreateVector(
+                         reinterpret_cast<const uint8_t*>(inputToForgetWeights.data()),
+                         sizeof(T) * inputToForgetWeights.size())));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
                                                                            tensorInfoInputSize.size()),
@@ -163,12 +165,13 @@
                                    buffers.size() - 1,
                                    flatBufferBuilder.CreateString("inputToForgetWeights"),
                                    weightQuantizationParameters));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
-                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToCellWeights.data()),
-                                                    sizeof(T) * inputToCellWeights.size())));
+                     flatBufferBuilder.CreateVector(
+                         reinterpret_cast<const uint8_t*>(inputToCellWeights.data()),
+                         sizeof(T) * inputToCellWeights.size())));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
                                                                            tensorInfoInputSize.size()),
@@ -176,12 +179,13 @@
                                    buffers.size() - 1,
                                    flatBufferBuilder.CreateString("inputToCellWeights"),
                                    weightQuantizationParameters));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
-                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(inputToOutputWeights.data()),
-                                                    sizeof(T) * inputToOutputWeights.size())));
+                     flatBufferBuilder.CreateVector(
+                         reinterpret_cast<const uint8_t*>(inputToOutputWeights.data()),
+                         sizeof(T) * inputToOutputWeights.size())));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(tensorInfoInputSize.data(),
                                                                            tensorInfoInputSize.size()),
@@ -189,7 +193,7 @@
                                    buffers.size() - 1,
                                    flatBufferBuilder.CreateString("inputToOutputWeights"),
                                    weightQuantizationParameters));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
     if (hasRecurrentToInputWeights)
     {
@@ -204,7 +208,7 @@
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("recurrentToInputWeights"),
                                        weightQuantizationParameters));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
@@ -213,7 +217,8 @@
 
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
-                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToForgetWeights.data()),
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                        recurrentToForgetWeights.data()),
                                                     sizeof(T) * recurrentToForgetWeights.size())));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
@@ -222,11 +227,12 @@
                                    buffers.size() - 1,
                                    flatBufferBuilder.CreateString("recurrentToForgetWeights"),
                                    weightQuantizationParameters));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
-                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToCellWeights.data()),
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                        recurrentToCellWeights.data()),
                                                     sizeof(T) * recurrentToCellWeights.size())));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
@@ -235,26 +241,28 @@
                                    buffers.size() - 1,
                                    flatBufferBuilder.CreateString("recurrentToCellWeights"),
                                    weightQuantizationParameters));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
-                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(recurrentToOutputWeights.data()),
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                        recurrentToOutputWeights.data()),
                                                     sizeof(T) * recurrentToOutputWeights.size())));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(tensorInfoOutputSize.data(),
                                                                            tensorInfoOutputSize.size()),
                                    tensorType,
-                                   buffers.size() - 1 ,
+                                   buffers.size() - 1,
                                    flatBufferBuilder.CreateString("recurrentToOutputWeights"),
                                    weightQuantizationParameters));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
     if (hasCellToInputWeights)
     {
         buffers.push_back(
             CreateBuffer(flatBufferBuilder,
-                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToInputWeights.data()),
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                            cellToInputWeights.data()),
                                                         sizeof(T) * cellToInputWeights.size())));
         tensors.push_back(CreateTensor(flatBufferBuilder,
                                        flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -263,7 +271,7 @@
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("cellToInputWeights"),
                                        weightQuantizationParameters));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
@@ -274,7 +282,8 @@
     {
         buffers.push_back(
             CreateBuffer(flatBufferBuilder,
-                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToForgetWeights.data()),
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                            cellToForgetWeights.data()),
                                                         sizeof(T) * cellToForgetWeights.size())));
         tensors.push_back(CreateTensor(flatBufferBuilder,
                                        flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -283,7 +292,7 @@
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("cellToForgetWeights"),
                                        weightQuantizationParameters));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
@@ -294,7 +303,8 @@
     {
         buffers.push_back(
             CreateBuffer(flatBufferBuilder,
-                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellToOutputWeights.data()),
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                            cellToOutputWeights.data()),
                                                         sizeof(T) * cellToOutputWeights.size())));
         tensors.push_back(CreateTensor(flatBufferBuilder,
                                        flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -303,7 +313,7 @@
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("cellToOutputWeights"),
                                        weightQuantizationParameters));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
@@ -322,7 +332,7 @@
                                        ::tflite::TensorType_FLOAT32,
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("inputGateBias")));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
@@ -331,7 +341,7 @@
 
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
-                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(forgetGateBias.data()),
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(forgetGateBias.data()),
                                                     sizeof(float) * forgetGateBias.size())));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -339,11 +349,11 @@
                                    ::tflite::TensorType_FLOAT32,
                                    buffers.size() - 1,
                                    flatBufferBuilder.CreateString("forgetGateBias")));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
-                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(cellBias.data()),
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(cellBias.data()),
                                                     sizeof(float) * cellBias.size())));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -351,11 +361,11 @@
                                    ::tflite::TensorType_FLOAT32,
                                    buffers.size() - 1,
                                    flatBufferBuilder.CreateString("cellBias")));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
     buffers.push_back(
         CreateBuffer(flatBufferBuilder,
-                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(outputGateBias.data()),
+                     flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(outputGateBias.data()),
                                                     sizeof(float) * outputGateBias.size())));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -363,14 +373,15 @@
                                    ::tflite::TensorType_FLOAT32,
                                    buffers.size() - 1,
                                    flatBufferBuilder.CreateString("outputGateBias")));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
     if (hasProjectionWeights)
     {
         buffers.push_back(
             CreateBuffer(flatBufferBuilder,
-                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(projectionWeights.data()),
-                                                        sizeof(T) * projectionWeights.size())));
+                         flatBufferBuilder.CreateVector(
+                             reinterpret_cast<const uint8_t*>(projectionWeights.data()),
+                             sizeof(T) * projectionWeights.size())));
         tensors.push_back(CreateTensor(flatBufferBuilder,
                                        flatBufferBuilder.CreateVector<int32_t>(projectionWeightDimensions.data(),
                                                                                projectionWeightDimensions.size()),
@@ -378,7 +389,7 @@
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("projectionWeights"),
                                        weightQuantizationParameters));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
@@ -389,22 +400,23 @@
     {
         buffers.push_back(
             CreateBuffer(flatBufferBuilder,
-                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(projectionBias.data()),
-                                                        sizeof(float) * projectionBias.size())));
+                         flatBufferBuilder.CreateVector(
+                             reinterpret_cast<const uint8_t*>(projectionBias.data()),
+                             sizeof(float) * projectionBias.size())));
         tensors.push_back(CreateTensor(flatBufferBuilder,
                                        flatBufferBuilder.CreateVector<int32_t>(projectionBiasDimensions.data(),
                                                                                projectionBiasDimensions.size()),
                                        ::tflite::TensorType_FLOAT32,
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("projectionBias")));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
         operatorInputs.push_back(kTfLiteOptionalTensor);
     }
 
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(outputStateInDimensions.data(),
                                                                            outputStateInDimensions.size()),
@@ -413,9 +425,9 @@
                                    flatBufferBuilder.CreateString("outputStateInInfo"),
                                    quantizationParameters,
                                    true));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(cellStateInDimensions.data(),
                                                                            cellStateInDimensions.size()),
@@ -424,22 +436,22 @@
                                    flatBufferBuilder.CreateString("cellStateInInfo"),
                                    quantizationParameters,
                                    true));
-    operatorInputs.push_back(buffers.size() - 1);
+    operatorInputs.push_back(tensors.size() - 1);
 
     if (hasInputLayerNormWeights)
     {
         buffers.push_back(
             CreateBuffer(flatBufferBuilder,
                          flatBufferBuilder.CreateVector(
-                                              reinterpret_cast<const uint8_t *>(inputLayerNormWeights.data()),
-                                              sizeof(float) * inputLayerNormWeights.size())));
+                             reinterpret_cast<const uint8_t*>(inputLayerNormWeights.data()),
+                             sizeof(float) * inputLayerNormWeights.size())));
         tensors.push_back(CreateTensor(flatBufferBuilder,
                                        flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
                                                                                tensorInfoNumUnits.size()),
                                        ::tflite::TensorType_FLOAT32,
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("inputLayerNormWeights")));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
@@ -451,15 +463,15 @@
         buffers.push_back(
             CreateBuffer(flatBufferBuilder,
                          flatBufferBuilder.CreateVector(
-                                              reinterpret_cast<const uint8_t *>(forgetLayerNormWeights.data()),
-                                              sizeof(float) * forgetLayerNormWeights.size())));
+                             reinterpret_cast<const uint8_t*>(forgetLayerNormWeights.data()),
+                             sizeof(float) * forgetLayerNormWeights.size())));
         tensors.push_back(CreateTensor(flatBufferBuilder,
                                        flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
                                                                                tensorInfoNumUnits.size()),
                                        ::tflite::TensorType_FLOAT32,
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("forgetLayerNormWeights")));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
@@ -470,7 +482,8 @@
     {
         buffers.push_back(
             CreateBuffer(flatBufferBuilder,
-                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t *>(cellLayerNormWeights.data()),
+                         flatBufferBuilder.CreateVector(reinterpret_cast<const uint8_t*>(
+                                                            cellLayerNormWeights.data()),
                                                         sizeof(float) * cellLayerNormWeights.size())));
         tensors.push_back(CreateTensor(flatBufferBuilder,
                                        flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -478,7 +491,7 @@
                                        ::tflite::TensorType_FLOAT32,
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("cellLayerNormWeights")));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
@@ -490,7 +503,7 @@
         buffers.push_back(
             CreateBuffer(flatBufferBuilder,
                          flatBufferBuilder.CreateVector(
-                             reinterpret_cast<const uint8_t *>(outputLayerNormWeights.data()),
+                             reinterpret_cast<const uint8_t*>(outputLayerNormWeights.data()),
                              sizeof(float) * outputLayerNormWeights.size())));
         tensors.push_back(CreateTensor(flatBufferBuilder,
                                        flatBufferBuilder.CreateVector<int32_t>(tensorInfoNumUnits.data(),
@@ -498,58 +511,63 @@
                                        ::tflite::TensorType_FLOAT32,
                                        buffers.size() - 1,
                                        flatBufferBuilder.CreateString("outputLayerNormWeights")));
-        operatorInputs.push_back(buffers.size() - 1);
+        operatorInputs.push_back(tensors.size() - 1);
     }
     else
     {
         operatorInputs.push_back(kTfLiteOptionalTensor);
     }
-    int outputBufferId = buffers.size();
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
     tensors.push_back(CreateTensor(flatBufferBuilder,
                                    flatBufferBuilder.CreateVector<int32_t>(outputShape.data(),
                                                                            outputShape.size()),
                                    ::tflite::TensorType_FLOAT32,
-                                   outputBufferId,
+                                   buffers.size() - 1,
                                    flatBufferBuilder.CreateString("output")));
     std::vector<int> operatorOutputs;
-    operatorOutputs.push_back(buffers.size() - 1);
+    operatorOutputs.push_back(tensors.size() - 1);
 
     // create operator
-    tflite::BuiltinOptions operatorBuiltinOptionsType = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
-    flatbuffers::Offset<void> operatorBuiltinOptions =
-        CreateUnidirectionalSequenceLSTMOptions(flatBufferBuilder,
-                          activationFunction,
-                          clippingThresCell,
-                          clippingThresProj,
-                          isTimeMajor).Union();
+    tflite::BuiltinOptions    operatorBuiltinOptionsType = BuiltinOptions_UnidirectionalSequenceLSTMOptions;
+    flatbuffers::Offset<void> operatorBuiltinOptions     =
+                                  CreateUnidirectionalSequenceLSTMOptions(flatBufferBuilder,
+                                                                          activationFunction,
+                                                                          clippingThresCell,
+                                                                          clippingThresProj,
+                                                                          isTimeMajor).Union();
 
     flatbuffers::Offset<Operator> lstmOperator =
-        CreateOperator(flatBufferBuilder,
-                       0,
-                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
-                       operatorBuiltinOptionsType, operatorBuiltinOptions);
+                                      CreateOperator(flatBufferBuilder,
+                                                     0,
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
+                                                                                             operatorInputs.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+                                                                                             operatorOutputs.size()),
+                                                     operatorBuiltinOptionsType, operatorBuiltinOptions);
 
-    flatbuffers::Offset <SubGraph> subgraph =
-        CreateSubGraph(flatBufferBuilder,
-                       flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(), operatorInputs.size()),
-                       flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(), operatorOutputs.size()),
-                       flatBufferBuilder.CreateVector(&lstmOperator, 1));
+    flatbuffers::Offset<SubGraph> subgraph =
+                                      CreateSubGraph(flatBufferBuilder,
+                                                     flatBufferBuilder.CreateVector(tensors.data(), tensors.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorInputs.data(),
+                                                                                             operatorInputs.size()),
+                                                     flatBufferBuilder.CreateVector<int32_t>(operatorOutputs.data(),
+                                                                                             operatorOutputs.size()),
+                                                     flatBufferBuilder.CreateVector(&lstmOperator, 1));
 
-    flatbuffers::Offset <flatbuffers::String> modelDescription =
-        flatBufferBuilder.CreateString("ArmnnDelegate: UnidirectionalSequenceLSTM Operator Model");
-    flatbuffers::Offset <OperatorCode> operatorCode =
-        CreateOperatorCode(flatBufferBuilder, tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM);
+    flatbuffers::Offset<flatbuffers::String> modelDescription =
+                                                 flatBufferBuilder.CreateString(
+                                                     "ArmnnDelegate: UnidirectionalSequenceLSTM Operator Model");
+    flatbuffers::Offset<OperatorCode> operatorCode =
+                                                 CreateOperatorCode(flatBufferBuilder,
+                                                 tflite::BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM);
 
-    flatbuffers::Offset <Model> flatbufferModel =
-        CreateModel(flatBufferBuilder,
-                    TFLITE_SCHEMA_VERSION,
-                    flatBufferBuilder.CreateVector(&operatorCode, 1),
-                    flatBufferBuilder.CreateVector(&subgraph, 1),
-                    modelDescription,
-                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+    flatbuffers::Offset<Model> flatbufferModel =
+                                   CreateModel(flatBufferBuilder,
+                                               TFLITE_SCHEMA_VERSION,
+                                               flatBufferBuilder.CreateVector(&operatorCode, 1),
+                                               flatBufferBuilder.CreateVector(&subgraph, 1),
+                                               modelDescription,
+                                               flatBufferBuilder.CreateVector(buffers));
 
     flatBufferBuilder.Finish(flatbufferModel);
 
@@ -557,7 +575,7 @@
                              flatBufferBuilder.GetBufferPointer() + flatBufferBuilder.GetSize());
 }
 
-template <typename T>
+template<typename T>
 void UnidirectionalSequenceLstmTestImpl(std::vector<armnn::BackendId>& backends,
                                         tflite::TensorType tensorType,
                                         int32_t batchSize,
@@ -609,69 +627,69 @@
     using namespace tflite;
 
     std::vector<char> modelBuffer = CreateUnidirectionalSequenceLstmTfLiteModel(tensorType,
-                                                          batchSize,
-                                                          timeSize,
-                                                          inputSize,
-                                                          outputSize,
-                                                          numUnits,
-                                                          hasInputToInputWeights,
-                                                          inputToInputWeights,
-                                                          inputToForgetWeights,
-                                                          inputToCellWeights,
-                                                          inputToOutputWeights,
-                                                          hasRecurrentToInputWeights,
-                                                          recurrentToInputWeights,
-                                                          recurrentToForgetWeights,
-                                                          recurrentToCellWeights,
-                                                          recurrentToOutputWeights,
-                                                          hasCellToInputWeights,
-                                                          cellToInputWeights,
-                                                          hasCellToForgetWeights,
-                                                          cellToForgetWeights,
-                                                          hasCellToOutputWeights,
-                                                          cellToOutputWeights,
-                                                          hasInputGateBias,
-                                                          inputGateBias,
-                                                          forgetGateBias,
-                                                          cellBias,
-                                                          outputGateBias,
-                                                          hasProjectionWeights,
-                                                          projectionWeights,
-                                                          hasProjectionBias,
-                                                          projectionBias,
-                                                          hasInputLayerNormWeights,
-                                                          inputLayerNormWeights,
-                                                          hasForgetLayerNormWeights,
-                                                          forgetLayerNormWeights,
-                                                          hasCellLayerNormWeights,
-                                                          cellLayerNormWeights,
-                                                          hasOutputLayerNormWeights,
-                                                          outputLayerNormWeights,
-                                                          activationFunction,
-                                                          clippingThresCell,
-                                                          clippingThresProj,
-                                                          isTimeMajor,
-                                                          quantScale);
+                                                                                batchSize,
+                                                                                timeSize,
+                                                                                inputSize,
+                                                                                outputSize,
+                                                                                numUnits,
+                                                                                hasInputToInputWeights,
+                                                                                inputToInputWeights,
+                                                                                inputToForgetWeights,
+                                                                                inputToCellWeights,
+                                                                                inputToOutputWeights,
+                                                                                hasRecurrentToInputWeights,
+                                                                                recurrentToInputWeights,
+                                                                                recurrentToForgetWeights,
+                                                                                recurrentToCellWeights,
+                                                                                recurrentToOutputWeights,
+                                                                                hasCellToInputWeights,
+                                                                                cellToInputWeights,
+                                                                                hasCellToForgetWeights,
+                                                                                cellToForgetWeights,
+                                                                                hasCellToOutputWeights,
+                                                                                cellToOutputWeights,
+                                                                                hasInputGateBias,
+                                                                                inputGateBias,
+                                                                                forgetGateBias,
+                                                                                cellBias,
+                                                                                outputGateBias,
+                                                                                hasProjectionWeights,
+                                                                                projectionWeights,
+                                                                                hasProjectionBias,
+                                                                                projectionBias,
+                                                                                hasInputLayerNormWeights,
+                                                                                inputLayerNormWeights,
+                                                                                hasForgetLayerNormWeights,
+                                                                                forgetLayerNormWeights,
+                                                                                hasCellLayerNormWeights,
+                                                                                cellLayerNormWeights,
+                                                                                hasOutputLayerNormWeights,
+                                                                                outputLayerNormWeights,
+                                                                                activationFunction,
+                                                                                clippingThresCell,
+                                                                                clippingThresProj,
+                                                                                isTimeMajor,
+                                                                                quantScale);
 
     const Model* tfLiteModel = GetModel(modelBuffer.data());
     // Create TfLite Interpreters
     std::unique_ptr<Interpreter> armnnDelegateInterpreter;
     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&armnnDelegateInterpreter) == kTfLiteOk);
+              (&armnnDelegateInterpreter) == kTfLiteOk);
     CHECK(armnnDelegateInterpreter != nullptr);
     CHECK(armnnDelegateInterpreter->AllocateTensors() == kTfLiteOk);
 
     std::unique_ptr<Interpreter> tfLiteInterpreter;
     CHECK(InterpreterBuilder(tfLiteModel, ::tflite::ops::builtin::BuiltinOpResolver())
-                  (&tfLiteInterpreter) == kTfLiteOk);
+              (&tfLiteInterpreter) == kTfLiteOk);
     CHECK(tfLiteInterpreter != nullptr);
     CHECK(tfLiteInterpreter->AllocateTensors() == kTfLiteOk);
 
     // Create the ArmNN Delegate
     armnnDelegate::DelegateOptions delegateOptions(backends);
     std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
-    theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
-                     armnnDelegate::TfLiteArmnnDelegateDelete);
+                                   theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
+                                                    armnnDelegate::TfLiteArmnnDelegateDelete);
     CHECK(theArmnnDelegate != nullptr);
     // Modify armnnDelegateInterpreter to use armnnDelegate
     CHECK(armnnDelegateInterpreter->ModifyGraphWithDelegate(theArmnnDelegate.get()) == kTfLiteOk);
@@ -684,7 +702,7 @@
         tfLiteDelageInputData[i] = inputValues[i];
     }
 
-    auto armnnDelegateInputId = armnnDelegateInterpreter->inputs()[0];
+    auto armnnDelegateInputId   = armnnDelegateInterpreter->inputs()[0];
     auto armnnDelegateInputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateInputId);
     for (unsigned int i = 0; i < inputValues.size(); ++i)
     {
@@ -696,10 +714,10 @@
     CHECK(armnnDelegateInterpreter->Invoke() == kTfLiteOk);
 
     // Compare output data
-    auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[0];
+    auto tfLiteDelegateOutputId   = tfLiteInterpreter->outputs()[0];
     auto tfLiteDelagateOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
-    auto armnnDelegateOutputId = armnnDelegateInterpreter->outputs()[0];
-    auto armnnDelegateOutputData = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
+    auto armnnDelegateOutputId    = armnnDelegateInterpreter->outputs()[0];
+    auto armnnDelegateOutputData  = armnnDelegateInterpreter->typed_tensor<float>(armnnDelegateOutputId);
 
     if (tensorType == ::tflite::TensorType_INT8)
     {
@@ -713,8 +731,10 @@
     }
     else
     {
-        armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData, expectedOutputValues.size());
-        armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData, expectedOutputValues.size());
+        armnnDelegate::CompareData(expectedOutputValues.data(), armnnDelegateOutputData,
+                                   expectedOutputValues.size());
+        armnnDelegate::CompareData(expectedOutputValues.data(), tfLiteDelagateOutputData,
+                                   expectedOutputValues.size());
         armnnDelegate::CompareData(tfLiteDelagateOutputData, armnnDelegateOutputData, expectedOutputValues.size());
     }
 }
diff --git a/delegate/src/test/UnpackTestHelper.hpp b/delegate/src/test/UnpackTestHelper.hpp
index 8487134..0e12d72 100644
--- a/delegate/src/test/UnpackTestHelper.hpp
+++ b/delegate/src/test/UnpackTestHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -36,7 +36,9 @@
     flatbuffers::FlatBufferBuilder flatBufferBuilder;
 
     std::vector<flatbuffers::Offset<tflite::Buffer>> buffers;
-    buffers.push_back(CreateBuffer(flatBufferBuilder, flatBufferBuilder.CreateVector({})));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+    buffers.push_back(CreateBuffer(flatBufferBuilder));
+
 
     auto quantizationParameters =
         CreateQuantizationParameters(flatBufferBuilder,
@@ -57,7 +59,7 @@
                               flatBufferBuilder.CreateVector<int32_t>(inputTensorShape.data(),
                                                                       inputTensorShape.size()),
                                                                       tensorType,
-                                                                      0,
+                                                                      1,
                                                                       flatBufferBuilder.CreateString("input"),
                                                                       quantizationParameters);
 
@@ -67,10 +69,11 @@
                                   flatBufferBuilder.CreateVector<int32_t>(outputTensorShape.data(),
                                                                           outputTensorShape.size()),
                                   tensorType,
-                                  0,
+                                      (i + 2),
                                   flatBufferBuilder.CreateString("output" + std::to_string(i)),
                                   quantizationParameters);
 
+        buffers.push_back(CreateBuffer(flatBufferBuilder));
         operatorOutputs.push_back(i + 1);
         subgraphOutputs.push_back(i + 1);
     }
@@ -105,7 +108,7 @@
                     flatBufferBuilder.CreateVector(&operatorCode, 1),
                     flatBufferBuilder.CreateVector(&subgraph, 1),
                     modelDescription,
-                    flatBufferBuilder.CreateVector(buffers.data(), buffers.size()));
+                    flatBufferBuilder.CreateVector(buffers));
 
     flatBufferBuilder.Finish(flatbufferModel);
 
diff --git a/shim/sl/CMakeLists.txt b/shim/sl/CMakeLists.txt
index e54101c..a33c71f 100644
--- a/shim/sl/CMakeLists.txt
+++ b/shim/sl/CMakeLists.txt
@@ -1,11 +1,13 @@
 #
-# Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+# Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
 # SPDX-License-Identifier: MIT
 #
 
 cmake_minimum_required (VERSION 3.7.0)
 enable_language(ASM)
 project(armnn_support_library)
+set(CMAKE_CXX_STANDARD 17)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
 
 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -std=c++17 -Wall -fexceptions -Werror -Wno-unused-parameter -Wno-unused-private-field -Wno-unused-variable -Wno-attributes -Wno-format-security -Wno-extern-c-compat -Wno-invalid-partial-specialization -Wno-unneeded-internal-declaration -Wno-unused-function -DNN_COMPATIBILITY_LIBRARY_BUILD -DNN_DEBUGGABLE")
 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden -DOPENSSL_SMALL -DBORINGSSL_ANDROID_SYSTEM -DBORINGSSL_SHARED_LIBRARY -DBORINGSSL_IMPLEMENTATION")