IVGCVSW-6989 "Merged experimental/armnn_shim_sl"

* Updated Serializer CMakeLists.txt to build armnnSerializerObj
* Added constant tensors as input support to SL

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: I22f6cf50147d99a01f7fe70d7446b114a4c57af3
diff --git a/Android.mk b/Android.mk
index 7f6cb7a..4262246 100644
--- a/Android.mk
+++ b/Android.mk
@@ -5,6 +5,14 @@
 
 LOCAL_PATH := $(call my-dir)
 
+ARMNN_ANDROID_MK_ENABLED := 1
+
+ifeq ($(ARMNN_ANDROID_MK_ENABLE),0)
+ARMNN_ANDROID_MK_ENABLED := 0
+endif
+
+ifeq ($(ARMNN_ANDROID_MK_ENABLED),1)
+
 # Configure these paths if you move the source or Khronos headers
 ARMNN_GENERATED_HEADER_PATH := $(LOCAL_PATH)/generated
 OPENCL_HEADER_PATH := $(LOCAL_PATH)/../clframework/include
@@ -523,3 +531,5 @@
 endif
 
 include $(BUILD_EXECUTABLE)
+
+endif # ARMNN_ENABLE
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 208bbf0..0b535b2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -116,7 +116,7 @@
     src/armnnUtils/Transpose.cpp
     )
 
-add_library_ex(armnnUtils STATIC ${armnnUtils_sources})
+add_library_ex(armnnUtils OBJECT ${armnnUtils_sources})
 target_include_directories(armnnUtils PRIVATE src/backends)
 
 if(BUILD_ONNX_PARSER)
diff --git a/shim/Android.bp b/shim/Android.bp
new file mode 100644
index 0000000..7e0d62a
--- /dev/null
+++ b/shim/Android.bp
@@ -0,0 +1,97 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+
+////////////////////////////////////////////
+//                                        //
+//           shim service                 //
+//                                        //
+////////////////////////////////////////////
+
+cc_prebuilt_library_shared {
+    name: "libarmnn_support_library",
+    check_elf_files: false,
+    shared_libs: [
+        "libbase",
+        "libcutils",
+        "liblog",
+        "libnativewindow",
+    ],
+    proprietary: true,
+    vendor: true,
+    // libnativewindow versioning trips this check. b/181227567 for fixing
+    allow_undefined_symbols: true,
+    target: {
+        android_x86_64: {
+            srcs: ["./sl/build/libarmnn_support_library.so"],
+        },
+        android_x86: {
+            srcs: ["./sl/build/libarmnn_support_library.so"],
+        },
+        android_arm64: {
+            srcs: ["./sl/build/libarmnn_support_library.so"],
+        },
+        android_arm: {
+            srcs: ["./sl/build/libarmnn_support_library.so"],
+        },
+    },
+    apex_available: ["//apex_available:vendor"],
+}
+
+cc_defaults {
+    name: "NeuralNetworksShimArmnnDriverAidl_defaults",
+    defaults: ["neuralnetworks_defaults"],
+    header_libs: [
+        "libneuralnetworks_headers",
+    ],
+    cflags: [
+        "-DNN_COMPATIBILITY_LIBRARY_BUILD",
+    ],
+    static_libs: [
+        "android.hardware.common-V2-ndk_platform",
+        "android.hardware.graphics.common-V2-ndk_platform",
+        "android.hardware.neuralnetworks-V1-ndk_platform",
+        "libaidlcommonsupport",
+        "libarect",
+        "libcutils",
+        "libneuralnetworks_shim_static",
+        "neuralnetworks_supportlibrary_loader",
+        "neuralnetworks_types",
+        "libneuralnetworks_common",
+        "neuralnetworks_utils_hal_aidl",
+        "neuralnetworks_utils_hal_common",
+    ],
+    shared_libs: [
+        "android.hidl.allocator@1.0",
+        "android.hidl.memory@1.0",
+        "libbase",
+        "libhidltransport", 
+        "libbinder_ndk",
+        "libhidlbase",
+        "libhidlmemory",
+        "liblog",
+        "libnativewindow",
+        "libutils",
+        "libarmnn_support_library",
+    ],
+
+}
+
+cc_defaults {
+    name: "NeuralNetworksShimArmnnDriverAidl_server_defaults",
+    defaults: ["NeuralNetworksShimArmnnDriverAidl_defaults"],
+    relative_install_path: "hw",
+    proprietary: true,
+}
+
+
+cc_binary {
+    name: "android.hardware.neuralnetworks-shim-service-armnn",
+    srcs: ["./shimservice.cpp"],
+    enabled: false,
+    defaults: ["NeuralNetworksShimArmnnDriverAidl_server_defaults"],
+    init_rc: ["./config/android.hardware.neuralnetworks-shim-service-armnn.rc"],
+    vintf_fragments: ["./config/android.hardware.neuralnetworks-shim-service-armnn.xml"],
+}
diff --git a/shim/BuildGuideShimSupportLibrary.md b/shim/BuildGuideShimSupportLibrary.md
new file mode 100644
index 0000000..dc69f8f
--- /dev/null
+++ b/shim/BuildGuideShimSupportLibrary.md
@@ -0,0 +1,162 @@
+# How to use the Android NDK to build Arm NN
+
+- [Introduction](#introduction)
+- [Prerequisites](#prerequisites)
+- [Download Arm NN](#download-arm-nn)
+- [Build Arm Compute Library](#build-arm-compute-library)
+- [Build Arm NN](#build-arm-nn)
+- [Build Arm NN Support Library](#build-arm-nn-support-library)
+- [Build Arm NN Shim](#build-arm-nn-shim)
+
+
+## Introduction
+These are step by step instructions for building the Arm NN shim and support library for NNAPI.
+This work is currently in an experimental phase.
+
+## Prerequisites
+
+The following are required to build the Arm NN support library
+* Android NDK r20b
+  * Detailed setup can be found in [BuildGuideAndroidNDK.md](../BuildGuideAndroidNDK.md)
+* Flatbuffer version 1.12.0
+  * Detailed setup can be found in [BuildGuideCrossCompilation.md](../BuildGuideCrossCompilation.md)
+
+The following is required to build the Arm NN shim
+* AOSP Source (Android Open Source Project)
+  * Download the source from the [official website](https://source.android.com/setup/build/downloading)
+  * This guide will use release tag `android12-s1-release`
+
+
+Set environment variables
+```bash
+export WORKING_DIR=<path to where the Arm NN source code, clframework and aosp repos will be cloned>
+export AOSP_ROOT=<path to the root of Android tree where the shim will be built>
+export AOSP_MODULES_ROOT=<path to where AOSP modules will be cloned i.e. $WORKING_DIR/aosp>
+export ARMNN_BUILD_DIR=<path to the Arm NN build directory i.e. $WORKING_DIR/build>
+export NDK=<path to>android-ndk-r20b
+export NDK_TOOLCHAIN_ROOT=$NDK/toolchains/llvm/prebuilt/linux-x86_64
+export PATH=$NDK_TOOLCHAIN_ROOT/bin/:$PATH
+export FLATBUFFERS_ANDROID_BUILD=<path to flatbuffers target android build>
+export FLATBUFFERS_X86_BUILD=<path to flatbuffers host build-x86_64>
+```
+
+## Download Arm NN
+If the user only wishes to build the Support Library with the NDK, the Arm NN repo can be cloned into any folder.
+If the user also wishes to build the Arm NN Shim, for this the Arm NN repo will need to reside within
+the Android tree in order for armnn/shim/Android.bp to be picked up by the Soong (Android) build system.
+For example $AOSP_ROOT/vendor/arm/armnn
+
+
+* Clone Arm NN:
+  (Requires Git if not previously installed: `sudo apt install git`)
+
+```bash
+cd $WORKING_DIR
+git clone https://github.com/ARM-software/armnn.git
+```
+
+## Build Arm Compute Library
+
+Arm NN provides a script that downloads the version of Arm Compute Library that Arm NN was tested with:
+```bash
+${WORKING_DIR}/armnn/scripts/get_compute_library.sh
+```
+* Build the Arm Compute Library:
+  (Requires SCons if not previously installed: `sudo apt install scons`)
+```bash
+cd ${WORKING_DIR}/clframework
+
+scons arch=arm64-v8a \
+toolchain_prefix=aarch64-linux-android- \
+compiler_prefix=aarch64-linux-android29- \
+neon=1 opencl=1 \
+embed_kernels=1 \
+build_dir=android-arm64v8a \
+extra_cxx_flags="-Wno-parentheses-equality -Wno-missing-braces -fPIC" \
+Werror=0 embed_kernels=1 examples=0 \
+validation_tests=0 benchmark_tests=0 benchmark_examples=0 os=android -j16
+```
+
+## Build Arm NN and Serializer
+
+* Build Arm NN:
+  (Requires CMake if not previously installed: `sudo apt install cmake`)
+```bash
+cd $ARMNN_BUILD_DIR
+CXX=aarch64-linux-android29-clang++ \
+CC=aarch64-linux-android29-clang \
+CXX_FLAGS="-fPIE -fPIC" cmake ${WORKING_DIR}/armnn \
+-DCMAKE_ANDROID_NDK=$NDK \
+-DCMAKE_SYSTEM_NAME=Android \
+-DCMAKE_SYSTEM_VERSION=29 \
+-DCMAKE_ANDROID_ARCH_ABI=arm64-v8a \
+-DCMAKE_EXE_LINKER_FLAGS="-pie -llog -lz" \
+-DARMCOMPUTE_ROOT=$WORKING_DIR/clframework/ \
+-DARMCOMPUTE_BUILD_DIR=$WORKING_DIR/clframework/build/android-arm64v8a/ \
+-DARMCOMPUTENEON=1 -DARMCOMPUTECL=1 -DARMNNREF=1 \
+-DFLATBUFFERS_ROOT=$FLATBUFFERS_ANDROID_BUILD \
+-DFLATC_DIR=$FLATBUFFERS_X86_BUILD \
+-DBUILD_ARMNN_SERIALIZER=1 -DBUILD_GATORD_MOCK=0 -DBUILD_BASE_PIPE_SERVER=0
+```
+
+ * Run the build
+```bash
+make -j16
+```
+
+## Build Arm NN Support Library
+
+Building the support library requires building some AOSP libraries via the NDK. 
+It should be possible to use $AOSP_ROOT instead of $AOSP_MODULES_ROOT.
+
+However this example will instead clone the necessary AOSP repos outside of the Android tree and apply some minor patches
+which were required to get it to build with the Android version used in this guide.
+
+```bash
+# Call a script which will clone the necessary AOSP repos (do not clone them into Android tree)
+${WORKING_DIR}/armnn/shim/sl/scripts/clone_aosp_libs.sh $AOSP_MODULES_ROOT
+
+# Modify the repos by applying patches
+${WORKING_DIR}/armnn/shim/sl/scripts/modify_aosp_libs.sh $AOSP_MODULES_ROOT
+
+# Build the Support Library
+CMARGS="$CMARGS \
+-DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake \
+-DANDROID_ABI=arm64-v8a \
+-DCMAKE_ANDROID_ARCH_ABI=arm64-v8a \
+-DCMAKE_ANDROID_NDK=$NDK \
+-DANDROID_PLATFORM=android-29 \
+-DAOSP_MODULES_ROOT=$AOSP_MODULES_ROOT \
+-DARMNN_SOURCE_DIR=$WORKING_DIR/armnn \
+-DArmnn_DIR=$ARMNN_BUILD_DIR "
+
+mkdir ${WORKING_DIR}/armnn/shim/sl/build
+cd ${WORKING_DIR}/armnn/shim/sl/build
+
+CXX=aarch64-linux-android29-clang++ \
+CC=aarch64-linux-android29-clang \
+cmake $CMARGS ../
+make
+```
+
+## Build Arm NN Shim
+
+By default the Arm NN shim Android.bp is not enabled.
+Enable it by editing armnn/shim/Android.bp and setting `enabled: true`
+
+```bash
+cd $AOSP_ROOT
+source build/envsetup.sh
+lunch <device>-eng
+cd vendor/arm/armnn/shim
+export ARMNN_ANDROID_MK_ENABLE=0
+mm
+```
+
+The built libraries and manifest file can be found here:
+$AOSP_ROOT/out/target/product/<device>/vendor/lib64/libarmnn_support_library.so
+$AOSP_ROOT/out/target/product/<device>/vendor/bin/hw/android.hardware.neuralnetworks-shim-service-armnn
+$AOSP_ROOT/out/target/product/<device>/vendor/etc/vintf/manifest/android.hardware.neuralnetworks-shim-service-armnn.xml
+
+Currently the Arm NN libraries are shared libraries and therefore will need to be pushed to the device:
+$ARMNN_BUILD_DIR/libarmnn.so
diff --git a/shim/BuildGuideShimSupportLibrary.md.license b/shim/BuildGuideShimSupportLibrary.md.license
new file mode 100644
index 0000000..ac6973d
--- /dev/null
+++ b/shim/BuildGuideShimSupportLibrary.md.license
@@ -0,0 +1,4 @@
+#
+# Copyright © 2022 ARM Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
\ No newline at end of file
diff --git a/shim/config/android.hardware.neuralnetworks-shim-service-armnn.rc b/shim/config/android.hardware.neuralnetworks-shim-service-armnn.rc
new file mode 100644
index 0000000..55661e4
--- /dev/null
+++ b/shim/config/android.hardware.neuralnetworks-shim-service-armnn.rc
@@ -0,0 +1,4 @@
+service neuralnetworks_hal_service_armnn /vendor/bin/hw/android.hardware.neuralnetworks-shim-service-armnn
+    class hal
+    user system
+    group system
diff --git a/shim/config/android.hardware.neuralnetworks-shim-service-armnn.rc.license b/shim/config/android.hardware.neuralnetworks-shim-service-armnn.rc.license
new file mode 100644
index 0000000..37ef01d
--- /dev/null
+++ b/shim/config/android.hardware.neuralnetworks-shim-service-armnn.rc.license
@@ -0,0 +1,4 @@
+#
+# Copyright © 2022 ARM Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
diff --git a/shim/config/android.hardware.neuralnetworks-shim-service-armnn.xml b/shim/config/android.hardware.neuralnetworks-shim-service-armnn.xml
new file mode 100644
index 0000000..a1258a5
--- /dev/null
+++ b/shim/config/android.hardware.neuralnetworks-shim-service-armnn.xml
@@ -0,0 +1,10 @@
+<!--                                                                 -->
+<!-- Copyright © 2022 ARM Ltd and Contributors. All rights reserved. -->
+<!-- SPDX-License-Identifier: MIT                                    -->
+<!--                                                                 -->
+<manifest version="1.0" type="device">
+    <hal format="aidl">
+        <name>android.hardware.neuralnetworks</name>
+        <fqname>IDevice/arm-armnn-shim</fqname>
+    </hal>
+</manifest>
diff --git a/shim/shimservice.cpp b/shim/shimservice.cpp
new file mode 100644
index 0000000..44dc596
--- /dev/null
+++ b/shim/shimservice.cpp
@@ -0,0 +1,51 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#define LOG_TAG "ArmnnDriver"
+
+#include <android-base/logging.h>
+#include <android-base/scopeguard.h>
+#include <dlfcn.h>
+
+#include "NeuralNetworksShim.h"
+#include "SupportLibrarySymbols.h"
+
+#include <string>
+
+using namespace std;
+
+int main()
+{
+    /// The platform shim allows use of the armnn support library driver (arm-armnn-sl) to create a
+    /// binderized vendor service (arm-armnn-shim) that is started at device startup
+
+    NnApiSLDriverImpl* impl = ANeuralNetworks_getSLDriverImpl();
+    if (impl == nullptr)
+    {
+        LOG(ERROR) << "ArmnnDriver: ANeuralNetworks_getSLDriverImpl returned nullptr!!!";
+        return EXIT_FAILURE;
+    }
+
+    ANeuralNetworksShimDeviceInfo* deviceInfo;
+    ANeuralNetworksShimDeviceInfo_create(&deviceInfo,
+                                         /*deviceName=*/"arm-armnn-sl",
+                                         /*serviceName=*/"arm-armnn-shim");
+    const auto guardDeviceInfo = android::base::make_scope_guard(
+            [deviceInfo] { ANeuralNetworksShimDeviceInfo_free(deviceInfo); });
+
+    ANeuralNetworksShimRegistrationParams* params;
+    ANeuralNetworksShimRegistrationParams_create(impl, &params);
+    const auto guardParams = android::base::make_scope_guard(
+            [params] { ANeuralNetworksShimRegistrationParams_free(params); });
+    ANeuralNetworksShimRegistrationParams_addDeviceInfo(params, deviceInfo);
+    ANeuralNetworksShimRegistrationParams_setNumberOfListenerThreads(params, 15);
+    ANeuralNetworksShimRegistrationParams_registerAsLazyService(params, false);
+    ANeuralNetworksShimRegistrationParams_fallbackToMinimumSupportDevice(params, false);
+
+    auto result = ANeuralNetworksShim_registerSupportLibraryService(params);
+    LOG(ERROR) << "ArmnnDriver: ANeuralNetworksShim_registerSupportLibraryService returned error status: " << result;
+
+    return EXIT_FAILURE;
+}
diff --git a/shim/sl/CMakeLists.txt b/shim/sl/CMakeLists.txt
new file mode 100644
index 0000000..82dc444
--- /dev/null
+++ b/shim/sl/CMakeLists.txt
@@ -0,0 +1,518 @@
+#
+# Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+cmake_minimum_required (VERSION 3.7.0)
+enable_language(ASM)
+project(armnn_support_library)
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -std=c++17 -Wall -fexceptions -Werror -Wno-unused-parameter -Wno-unused-private-field -Wno-unused-variable -Wno-attributes -Wno-format-security -Wno-extern-c-compat -Wno-invalid-partial-specialization -Wno-unneeded-internal-declaration -Wno-unused-function -DNN_COMPATIBILITY_LIBRARY_BUILD -DNN_DEBUGGABLE")
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden -DOPENSSL_SMALL -DBORINGSSL_ANDROID_SYSTEM -DBORINGSSL_SHARED_LIBRARY -DBORINGSSL_IMPLEMENTATION")
+
+set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+
+include(GNUInstallDirs)
+
+SET(libnnapi_support_include_directories)
+list(APPEND libnnapi_support_include_directories
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/types/include/nnapi/
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/types/include/
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/include
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/include
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/shim_and_sl/public
+        ${AOSP_MODULES_ROOT}/system/logging/liblog/include
+        ${AOSP_MODULES_ROOT}/system/libbase/include
+        ${AOSP_MODULES_ROOT}/frameworks/native/libs/nativewindow/include
+        ${AOSP_MODULES_ROOT}/system/core/libcutils/include
+        ${AOSP_MODULES_ROOT}/system/core/include
+        ${AOSP_MODULES_ROOT}/external/tensorflow
+        ${AOSP_MODULES_ROOT}/external/gemmlowp/
+        ${AOSP_MODULES_ROOT}/external/ruy/
+        ${AOSP_MODULES_ROOT}/external/eigen/
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/include
+        ${AOSP_MODULES_ROOT}/external/boringssl/include)
+
+include_directories(${libnnapi_support_include_directories})
+
+set(base_sources)
+list(APPEND base_sources
+        ${AOSP_MODULES_ROOT}/system/libbase/abi_compatibility.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/posix_strerror_r.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/chrono_utils.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/cmsg.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/file.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/hex.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/logging.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/mapped_file.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/parsebool.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/parsenetaddress.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/process.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/properties.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/stringprintf.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/strings.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/test_utils.cpp
+        ${AOSP_MODULES_ROOT}/system/libbase/threads.cpp)
+add_library(base STATIC ${base_sources})
+target_include_directories (base PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
+target_include_directories (base PUBLIC ${libnnapi_support_include_directories})
+
+file(GLOB TYPES_CL_SOURCE_FILES
+    ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/*Validation.cpp
+)
+
+set(neuralnetworks_types_cl_sources)
+list(APPEND neuralnetworks_types_cl_sources
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/types/src/DynamicCLDeps.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/types/src/SharedMemory.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/types/src/SharedMemoryAndroid.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/types/src/TypeUtils.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/types/src/OperationsValidationUtils.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/types/src/Types.cpp
+        ${TYPES_CL_SOURCE_FILES}
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/types/src/Validation.cpp)
+
+add_library(neuralnetworks_types_cl STATIC ${neuralnetworks_types_cl_sources})
+target_include_directories (neuralnetworks_types_cl PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
+target_include_directories (neuralnetworks_types_cl PUBLIC ${libnnapi_support_include_directories})
+
+set(neuralnetworks_common_cl_sources)
+list(APPEND neuralnetworks_common_cl_sources
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/BufferTracker.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/CpuExecutor.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/GraphDump.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/IndexedShapeWrapper.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/LegacyUtils.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/ModelUtils.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/MetaModel.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/OperationsExecutionUtils.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/types/src/OperationsUtils.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/TokenHasher.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/OperationResolver.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/ActivationExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/BatchMatmulExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/BidirectionalSequenceRNNExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/BroadcastExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/ChannelShuffleExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/ComparisonsExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/ConcatenationExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/Conv2DExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/DepthwiseConv2DExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/DequantizeExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/ElementwiseExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/EluExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/FillExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/FullyConnectedExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/GatherExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/GenerateProposalsExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/HeatmapMaxKeypointExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/InstanceNormalizationExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/L2NormalizationExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/LocalResponseNormalizationExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/LogSoftmaxExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/LogicalAndOrExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/LogicalNotExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/MirrorPadExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/NegExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/PackExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/PReluExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/PoolingExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/QLSTMExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/QuantizeExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/RankExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/ReduceExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/ReshapeExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/ReverseExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/ResizeImageOpsExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/RoiAlignExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/RoiPoolingExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/SelectExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/SliceExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/SoftmaxExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/SqueezeExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/StridedSliceExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/TopK_V2Execution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/TransposeExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/TransposeConv2DExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/UnidirectionalSequenceLSTMExecution.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/common/operations/UnidirectionalSequenceRNNExecution.cpp)
+
+add_library(neuralnetworks_common_cl STATIC ${neuralnetworks_common_cl_sources})
+target_include_directories (neuralnetworks_common_cl PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
+target_include_directories (neuralnetworks_common_cl PUBLIC ${libnnapi_support_include_directories})
+
+set(neuralnetworks_cl_sources)
+list(APPEND neuralnetworks_cl_sources
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/BurstBuilder.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/CompilationBuilder.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/ExecutionBuilder.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/ExecutionCallback.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/ExecutionPlan.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/ServerFlag.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/Manager.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/Memory.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/ModelArchHasher.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/ModelArgumentInfo.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/ModelBuilder.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/NeuralNetworks.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/SupportLibraryDiagnostic.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/Telemetry.cpp
+        ${AOSP_MODULES_ROOT}/packages/modules/NeuralNetworks/runtime/TypeManager.cpp)
+
+add_library(neuralnetworks_cl OBJECT ${neuralnetworks_cl_sources})
+target_include_directories (neuralnetworks_cl PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
+target_include_directories (neuralnetworks_cl PUBLIC ${libnnapi_support_include_directories})
+
+set(crypto_static_sources)
+list(APPEND crypto_static_sources
+        ${AOSP_MODULES_ROOT}/external/boringssl/err_data.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_bitstr.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_bool.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_d2i_fp.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_dup.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_enum.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_gentm.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_i2d_fp.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_int.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_mbstr.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_object.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_octet.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_print.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_strex.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_strnid.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_time.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_type.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_utctm.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/a_utf8.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/asn1_lib.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/asn1_par.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/asn_pack.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/f_int.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/f_string.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/tasn_dec.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/tasn_enc.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/tasn_fre.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/tasn_new.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/tasn_typ.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/tasn_utl.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/asn1/time_support.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/base64/base64.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bio/bio.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bio/bio_mem.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bio/connect.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bio/fd.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bio/file.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bio/hexdump.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bio/pair.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bio/printf.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bio/socket.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bio/socket_helper.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/blake2/blake2.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bn_extra/bn_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bn_extra/convert.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/buf/buf.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bytestring/asn1_compat.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bytestring/ber.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bytestring/cbb.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bytestring/cbs.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bytestring/unicode.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/bytestring/unicode.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/chacha/chacha.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/cipher_extra.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/derive_key.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_aesccm.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_aesctrhmac.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_aesgcmsiv.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_chacha20poly1305.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_des.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_null.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_rc2.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_rc4.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_tls.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/tls_cbc.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cmac/cmac.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/conf/conf.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_aarch64_apple.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_aarch64_fuchsia.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_aarch64_linux.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_aarch64_win.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_arm.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_arm_linux.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_intel.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_ppc64le.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/crypto.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/curve25519/curve25519.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/curve25519/spake25519.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/des/des.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/dh_extra/dh_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/dh_extra/params.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/digest_extra/digest_extra.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/dsa/dsa.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/dsa/dsa_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/ec_extra/ec_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/ec_extra/ec_derive.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_aesccm.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_aesctrhmac.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_aesgcmsiv.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_chacha20poly1305.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_des.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_null.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_rc2.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_rc4.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/e_tls.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cipher_extra/tls_cbc.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cmac/cmac.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/conf/conf.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_aarch64_apple.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_aarch64_fuchsia.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_aarch64_linux.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_aarch64_win.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_arm.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_arm_linux.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_intel.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/cpu_ppc64le.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/crypto.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/curve25519/curve25519.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/curve25519/spake25519.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/des/des.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/dh_extra/dh_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/dh_extra/params.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/digest_extra/digest_extra.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/dsa/dsa.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/dsa/dsa_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/ec_extra/ec_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/ec_extra/ec_derive.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/ec_extra/hash_to_curve.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/ecdh_extra/ecdh_extra.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/ecdsa_extra/ecdsa_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/engine/engine.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/err/err.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/digestsign.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/evp.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/evp_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/evp_ctx.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/p_dsa_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/p_ec.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/p_ec_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/p_ed25519.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/p_ed25519_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/p_rsa.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/p_rsa_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/p_x25519.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/p_x25519_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/pbkdf.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/print.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/scrypt.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/evp/sign.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/ex_data.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/fipsmodule/fips_shared_support.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/hkdf/hkdf.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/hpke/hpke.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/hrss/hrss.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/lhash/lhash.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/mem.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/obj/obj.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/obj/obj_xref.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pem/pem_all.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pem/pem_info.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pem/pem_lib.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pem/pem_oth.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pem/pem_pk8.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pem/pem_pkey.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pem/pem_x509.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pem/pem_xaux.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pkcs7/pkcs7.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pkcs7/pkcs7_x509.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pkcs8/p5_pbev2.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pkcs8/pkcs8.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pkcs8/pkcs8_x509.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/poly1305/poly1305.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/poly1305/poly1305_arm.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/poly1305/poly1305_vec.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/pool/pool.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/rand_extra/deterministic.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/rand_extra/forkunsafe.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/rand_extra/fuchsia.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/rand_extra/passive.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/rand_extra/rand_extra.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/rand_extra/windows.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/rc4/rc4.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/refcount_c11.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/refcount_lock.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/rsa_extra/rsa_asn1.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/rsa_extra/rsa_print.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/siphash/siphash.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/stack/stack.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/thread.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/thread_none.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/thread_pthread.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/thread_win.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/trust_token/pmbtoken.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/trust_token/trust_token.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/trust_token/voprf.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/a_digest.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/a_sign.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/a_verify.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/algorithm.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/asn1_gen.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/by_dir.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/by_file.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/i2d_pr.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/name_print.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/rsa_pss.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/t_crl.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/t_req.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/t_x509.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/t_x509a.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_att.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_cmp.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_d2.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_def.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_ext.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_lu.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_obj.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_req.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_set.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_trs.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_txt.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_v3.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_vfy.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509_vpm.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509cset.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509name.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509rset.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x509spki.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_algor.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_all.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_attrib.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_crl.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_exten.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_info.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_name.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_pkey.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_pubkey.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_req.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_sig.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_spki.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_val.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_x509.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509/x_x509a.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/pcy_cache.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/pcy_data.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/pcy_lib.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/pcy_map.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/pcy_node.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/pcy_tree.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_akey.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_akeya.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_alt.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_bcons.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_bitst.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_conf.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_cpols.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_crld.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_enum.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_extku.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_genn.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_ia5.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_info.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_int.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_lib.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_ncons.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_ocsp.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_pci.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_pcia.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_pcons.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_pmaps.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_prn.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_purp.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_skey.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/x509v3/v3_utl.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/src/crypto/fipsmodule/bcm.c
+        ${AOSP_MODULES_ROOT}/external/boringssl/linux-aarch64/crypto/chacha/chacha-armv8.S
+        ${AOSP_MODULES_ROOT}/external/boringssl/linux-aarch64/crypto/fipsmodule/aesv8-armx64.S
+        ${AOSP_MODULES_ROOT}/external/boringssl/linux-aarch64/crypto/fipsmodule/armv8-mont.S
+        ${AOSP_MODULES_ROOT}/external/boringssl/linux-aarch64/crypto/fipsmodule/ghash-neon-armv8.S
+        ${AOSP_MODULES_ROOT}/external/boringssl/linux-aarch64/crypto/fipsmodule/ghashv8-armx64.S
+        ${AOSP_MODULES_ROOT}/external/boringssl/linux-aarch64/crypto/fipsmodule/sha1-armv8.S
+        ${AOSP_MODULES_ROOT}/external/boringssl/linux-aarch64/crypto/fipsmodule/sha256-armv8.S
+        ${AOSP_MODULES_ROOT}/external/boringssl/linux-aarch64/crypto/fipsmodule/sha512-armv8.S
+        ${AOSP_MODULES_ROOT}/external/boringssl/linux-aarch64/crypto/fipsmodule/vpaes-armv8.S
+        )
+
+add_library(crypto_static STATIC ${crypto_static_sources})
+target_include_directories (crypto_static PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
+target_include_directories (crypto_static PUBLIC ${libnnapi_support_include_directories})
+
+message(STATUS "AOSP_MODULES_ROOT: ${AOSP_MODULES_ROOT}")
+
+# Add Armnn as a Dependency
+message(STATUS "ARMNN_SOURCE_DIR: ${ARMNN_SOURCE_DIR}")
+message(STATUS "Armnn_DIR: ${Armnn_DIR}")
+
+set(Armnn_DIR "${Armnn_DIR}")
+if(NOT ARMNN_SUB_PROJECT)
+    find_package(Armnn REQUIRED CONFIG HINTS ${Armnn_DIR})
+endif()
+
+add_library(thirdparty_headers INTERFACE)
+target_include_directories(thirdparty_headers INTERFACE $<BUILD_INTERFACE:${ARMNN_SOURCE_DIR}/third-party>
+        $<INSTALL_INTERFACE:include/thirdparty_headers>)
+
+add_library(profiling_library_headers INTERFACE)
+target_include_directories(profiling_library_headers INTERFACE $<BUILD_INTERFACE:${ARMNN_SOURCE_DIR}/profiling>
+        $<INSTALL_INTERFACE:include/profiling_library_headers>)
+
+set(armnn_support_library_sources)
+list(APPEND armnn_support_library_sources
+        canonical/ArmnnPreparedModel.cpp
+        canonical/ArmnnPreparedModel.hpp
+        canonical/ArmnnDevice.cpp
+        canonical/ArmnnDevice.hpp
+        canonical/ArmnnDriver.hpp
+        canonical/ArmnnDriverImpl.cpp
+        canonical/ArmnnDriverImpl.hpp
+        canonical/CacheDataHandler.cpp
+        canonical/CacheDataHandler.hpp
+        canonical/CanonicalUtils.cpp
+        canonical/CanonicalUtils.hpp
+        canonical/ConversionUtils.cpp
+        canonical/ConversionUtils.hpp
+        canonical/Converter.cpp
+        canonical/Converter.hpp
+        canonical/DriverOptions.cpp
+        canonical/DriverOptions.hpp
+        canonical/ModelToINetworkTransformer.cpp
+        canonical/ModelToINetworkTransformer.hpp
+        canonical/SystemPropertiesUtils.hpp
+        support_library_service.cpp)
+
+list(APPEND armnn_support_library_sources "$<TARGET_OBJECTS:Armnn::armnnUtils>")
+list(APPEND armnn_support_library_sources "$<TARGET_OBJECTS:Armnn::armnnSerializerObj>")
+add_library(armnn_support_library SHARED ${armnn_support_library_sources})
+
+target_link_libraries(armnn_support_library PUBLIC Armnn::Armnn)
+target_link_libraries(armnn_support_library PUBLIC profiling_library_headers)
+target_link_libraries(armnn_support_library PRIVATE thirdparty_headers)
+
+target_link_libraries(armnn_support_library PRIVATE neuralnetworks_types_cl)
+target_link_libraries(armnn_support_library PRIVATE neuralnetworks_common_cl)
+target_link_libraries(armnn_support_library PRIVATE neuralnetworks_cl)
+target_link_libraries(armnn_support_library PRIVATE -Wl, -Bsymbolic, crypto_static)
+target_link_libraries(armnn_support_library PRIVATE base)
+
+target_include_directories(armnn_support_library PUBLIC ${libnnapi_support_include_directories})
+
+target_link_libraries(armnn_support_library PUBLIC -Wl,-undefined -Wl,dynamic_lookup)
+
+# find the liblog
+find_library(log-lib log)
+target_link_libraries(armnn_support_library PUBLIC ${log-lib})
+
+# find the libnativewindow
+find_library(nativewindow-lib nativewindow)
+target_link_libraries(armnn_support_library PUBLIC ${nativewindow-lib})
+
+####################################################
diff --git a/shim/sl/README.md b/shim/sl/README.md
new file mode 100644
index 0000000..4650965
--- /dev/null
+++ b/shim/sl/README.md
@@ -0,0 +1,38 @@
+# Arm NN Support Library Neural Networks driver
+
+This directory contains the Arm NN Support Library for the Android Neural Networks API.
+
+# Passing parameters to the support library runtime.
+
+The support library inherits it's parameters from the Arm NN Android Neural Networks driver. Parameters are passed to it through an environment variable, ARMNN_SL_OPTIONS. A full list of parameters are available ./canonical/DriverOptions.cpp.
+
+# Sample usage
+
+## Running NeuralNetworksSupportLibraryTest
+
+This test suite takes as it's first argument the path to a shared object implementation of the support library. Any library dependencies should be resolvable through the LD_LIBRARY_PATH mechanism. Setting ARMNN_SL_OPTIONS will pass parameters to the Arm NN Support Library Neural Networks driver.
+
+Here we assume that Bash is the current shell and specify "-v" to enable verbose logging and "-c CpuAcc" to direct that the Neon(TM) accelerator be used.
+~~~
+ARMNN_SL_OPTIONS="-v -c CpuAcc" ./NeuralNetworksSupportLibraryTest ./libarmnn_support_library.so
+~~~
+
+## Running TfLite Benchmarking tool
+
+This tools' parameters are described [here](https://www.tensorflow.org/lite/performance/measurement). The support library specific parts are to specify the path to the library and to ensure that ARMNN_SL_OPTIONS is set in the environment.
+
+support for relaxed computation from Float32 to Float16"
+~~~
+ARMNN_SL_OPTIONS="-v -c GpuAcc -f" ./android_aarch64_benchmark_model --graph=./mymodel.tflite --num_threads=1 --use_nnapi=true --num_runs=1 --nnapi_support_library_path=./libarmnn_support_library.so --nnapi_accelerator_name=arm-armnn-sl
+~~~
+
+### License
+
+The Arm NN Support Library Neural Networks driver is provided under the [MIT](https://spdx.org/licenses/MIT.html) license.
+See [LICENSE](LICENSE) for more information. Contributions to this project are accepted under the same license.
+
+Individual files contain the following tag instead of the full license text.
+
+    SPDX-License-Identifier: MIT
+
+This enables machine processing of license information based on the SPDX License Identifiers that are available here: http://spdx.org/licenses/
diff --git a/shim/sl/README.md.license b/shim/sl/README.md.license
new file mode 100644
index 0000000..37ef01d
--- /dev/null
+++ b/shim/sl/README.md.license
@@ -0,0 +1,4 @@
+#
+# Copyright © 2022 ARM Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
diff --git a/shim/sl/canonical/ArmnnDevice.cpp b/shim/sl/canonical/ArmnnDevice.cpp
new file mode 100644
index 0000000..c404822
--- /dev/null
+++ b/shim/sl/canonical/ArmnnDevice.cpp
@@ -0,0 +1,147 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#define LOG_TAG "arm-armnn-sl"
+
+#include "ArmnnDevice.hpp"
+
+#include <LegacyUtils.h>
+#include <OperationsUtils.h>
+
+#include <log/log.h>
+
+#include <memory>
+#include <string>
+
+#ifdef __ANDROID__
+#include <android/log.h>
+#endif
+
+namespace
+{
+
+std::string GetBackendString(const armnn_driver::DriverOptions& options)
+{
+    std::stringstream backends;
+    for (auto&& b : options.GetBackends())
+    {
+        backends << b << " ";
+    }
+    return backends.str();
+}
+
+} // anonymous namespace
+
+namespace armnn_driver
+{
+
+using namespace android::nn;
+
+ArmnnDevice::ArmnnDevice(DriverOptions options)
+    : m_Runtime(nullptr, nullptr)
+    , m_ClTunedParameters(nullptr)
+    , m_Options(std::move(options))
+{
+    // First check if the DriverOptions is happy.
+    if (options.ShouldExit())
+    {
+        // Is this a good or bad exit?
+        if (options.GetExitCode() != EXIT_SUCCESS)
+        {
+            throw armnn::InvalidArgumentException("ArmnnDevice: Insufficient or illegal options specified.");
+        }
+        else
+        {
+            throw armnn::InvalidArgumentException("ArmnnDevice: Nothing to do.");
+        }
+    }
+
+    initVLogMask();
+    VLOG(DRIVER) << "ArmnnDevice::ArmnnDevice()";
+
+#ifdef __ANDROID__
+    __android_log_print(ANDROID_LOG_DEBUG, "ARMNN_SL", "ArmnnDevice::ArmnnDevice()");
+#endif
+
+    armnn::ConfigureLogging(false, m_Options.IsVerboseLoggingEnabled(), armnn::LogSeverity::Trace);
+    if (m_Options.IsVerboseLoggingEnabled())
+    {
+        SetMinimumLogSeverity(android::base::VERBOSE);
+    }
+    else
+    {
+        SetMinimumLogSeverity(android::base::INFO);
+    }
+
+    armnn::IRuntime::CreationOptions runtimeOptions;
+
+#if defined(ARMCOMPUTECL_ENABLED)
+    try
+    {
+        if (!m_Options.GetClTunedParametersFile().empty())
+        {
+            m_ClTunedParameters = armnn::IGpuAccTunedParameters::Create(m_Options.GetClTunedParametersMode(),
+                                                                        m_Options.GetClTuningLevel());
+            try
+            {
+                m_ClTunedParameters->Load(m_Options.GetClTunedParametersFile().c_str());
+            }
+            catch (std::exception& error)
+            {
+                // This is only a warning because the file won't exist the first time you are generating it.
+                VLOG(DRIVER) << "ArmnnDevice: Failed to load CL tuned parameters file "
+                      << m_Options.GetClTunedParametersFile().c_str() << " : " <<  error.what());
+            }
+            runtimeOptions.m_GpuAccTunedParameters = m_ClTunedParameters;
+        }
+    }
+    catch (const armnn::ClRuntimeUnavailableException& error)
+    {
+        VLOG(DRIVER) <<  "ArmnnDevice: Failed to setup CL runtime: %s. Device will be unavailable." << error.what();
+    }
+    catch (std::exception& error)
+    {
+        VLOG(DRIVER) <<  "ArmnnDevice: Unknown exception: %s. Device will be unavailable." << error.what();
+    }
+#endif
+    runtimeOptions.m_EnableGpuProfiling = m_Options.IsGpuProfilingEnabled();
+    m_Runtime = armnn::IRuntime::Create(runtimeOptions);
+
+    std::vector<armnn::BackendId> backends;
+
+    if (m_Runtime)
+    {
+        const armnn::BackendIdSet supportedDevices = m_Runtime->GetDeviceSpec().GetSupportedBackends();
+        for (auto &backend : m_Options.GetBackends())
+        {
+            if (std::find(supportedDevices.cbegin(), supportedDevices.cend(), backend) == supportedDevices.cend())
+            {
+                VLOG(DRIVER) << "ArmnnDevice: Requested unknown backend " << backend.Get().c_str();
+            }
+            else
+            {
+                backends.push_back(backend);
+            }
+        }
+    }
+
+    if (backends.empty())
+    {
+        // No known backend specified
+        throw armnn::InvalidArgumentException("ArmnnDevice: No known backend specified.");
+    }
+
+    m_Options.SetBackends(backends);
+    VLOG(DRIVER) << "ArmnnDevice: Created device with the following backends: " << GetBackendString(m_Options).c_str();
+
+#ifdef __ANDROID__
+    __android_log_print(ANDROID_LOG_DEBUG,
+                        "ARMNN_SL",
+                        "ArmnnDevice: Created device with the following backends: %s",
+                        GetBackendString(m_Options).c_str());
+#endif
+}
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/ArmnnDevice.hpp b/shim/sl/canonical/ArmnnDevice.hpp
new file mode 100644
index 0000000..9597bfc
--- /dev/null
+++ b/shim/sl/canonical/ArmnnDevice.hpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "DriverOptions.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+namespace armnn_driver
+{
+
+class ArmnnDevice
+{
+
+protected:
+    ArmnnDevice(DriverOptions options);
+    virtual ~ArmnnDevice() {}
+
+protected:
+    armnn::IRuntimePtr m_Runtime;
+    armnn::IGpuAccTunedParametersPtr m_ClTunedParameters;
+    DriverOptions m_Options;
+};
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/ArmnnDriver.hpp b/shim/sl/canonical/ArmnnDriver.hpp
new file mode 100644
index 0000000..877faa6
--- /dev/null
+++ b/shim/sl/canonical/ArmnnDriver.hpp
@@ -0,0 +1,247 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <android-base/logging.h>
+#include <nnapi/IBuffer.h>
+#include <nnapi/IDevice.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+
+#include "ArmnnDevice.hpp"
+#include "ArmnnDriverImpl.hpp"
+#include "Converter.hpp"
+
+#include "ArmnnDriverImpl.hpp"
+#include "ModelToINetworkTransformer.hpp"
+
+#include <log/log.h>
+namespace armnn_driver
+{
+
+//using namespace android::nn;
+
+class ArmnnDriver : public ArmnnDevice, public IDevice
+{
+public:
+
+    ArmnnDriver(DriverOptions options)
+        : ArmnnDevice(std::move(options))
+    {
+        VLOG(DRIVER) << "ArmnnDriver::ArmnnDriver()";
+    }
+    ~ArmnnDriver()
+    {
+        VLOG(DRIVER) << "ArmnnDriver::~ArmnnDriver()";
+        // Unload the networks
+        for (auto& netId : ArmnnDriverImpl::GetLoadedNetworks())
+        {
+            m_Runtime->UnloadNetwork(netId);
+        }
+        ArmnnDriverImpl::ClearNetworks();
+    }
+
+public:
+
+    const std::string& getName() const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::getName()";
+        static const std::string name = "arm-armnn-sl";
+        return name;
+    }
+
+    const std::string& getVersionString() const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::getVersionString()";
+        static const std::string versionString = "ArmNN";
+        return versionString;
+    }
+
+    Version getFeatureLevel() const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::getFeatureLevel()";
+        return kVersionFeatureLevel5;
+    }
+
+    DeviceType getType() const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::getType()";
+        return DeviceType::CPU;
+    }
+
+    const std::vector<Extension>& getSupportedExtensions() const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::getSupportedExtensions()";
+        static const std::vector<Extension> extensions = {};
+        return extensions;
+    }
+
+    const Capabilities& getCapabilities() const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::GetCapabilities()";
+        return ArmnnDriverImpl::GetCapabilities(m_Runtime);
+    }
+
+    std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::getNumberOfCacheFilesNeeded()";
+        unsigned int numberOfCachedModelFiles = 0;
+        for (auto& backend : m_Options.GetBackends())
+        {
+            numberOfCachedModelFiles += GetNumberOfCacheFiles(backend);
+            VLOG(DRIVER) << "ArmnnDriver::getNumberOfCacheFilesNeeded() = " << std::to_string(numberOfCachedModelFiles);
+        }
+        return std::make_pair(numberOfCachedModelFiles, 1ul);
+    }
+
+    GeneralResult<void> wait() const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::wait()";
+        return {};
+    }
+
+    GeneralResult<std::vector<bool>> getSupportedOperations(const Model& model) const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::getSupportedOperations()";
+
+        std::stringstream ss;
+        ss << "ArmnnDriverImpl::getSupportedOperations()";
+        std::string fileName;
+        std::string timestamp;
+        if (!m_Options.GetRequestInputsAndOutputsDumpDir().empty())
+        {
+            ss << " : "
+               << m_Options.GetRequestInputsAndOutputsDumpDir()
+               << "/"
+               // << GetFileTimestamp()
+               << "_getSupportedOperations.txt";
+        }
+        VLOG(DRIVER) << ss.str().c_str();
+
+        if (!m_Options.GetRequestInputsAndOutputsDumpDir().empty())
+        {
+            //dump the marker file
+            std::ofstream fileStream;
+            fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
+            if (fileStream.good())
+            {
+                fileStream << timestamp << std::endl;
+                fileStream << timestamp << std::endl;
+            }
+            fileStream.close();
+        }
+
+        std::vector<bool> result;
+        if (!m_Runtime)
+        {
+            return NN_ERROR(ErrorStatus::DEVICE_UNAVAILABLE) << "Device Unavailable!";
+        }
+
+        // Run general model validation, if this doesn't pass we shouldn't analyse the model anyway.
+        if (const auto result = validate(model); !result.ok())
+        {
+            return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Invalid Model!";
+        }
+
+        // Attempt to convert the model to an ArmNN input network (INetwork).
+        ModelToINetworkTransformer modelConverter(m_Options.GetBackends(),
+                                                  model,
+                                                  m_Options.GetForcedUnsupportedOperations());
+
+        if (modelConverter.GetConversionResult() != ConversionResult::Success
+            && modelConverter.GetConversionResult() != ConversionResult::UnsupportedFeature)
+        {
+            return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Conversion Error!";
+        }
+
+        // Check each operation if it was converted successfully and copy the flags
+        // into the result (vector<bool>) that we need to return to Android.
+        result.reserve(model.main.operations.size());
+        for (uint32_t operationIdx = 0; operationIdx < model.main.operations.size(); ++operationIdx)
+        {
+            bool operationSupported = modelConverter.IsOperationSupported(operationIdx);
+            result.push_back(operationSupported);
+        }
+
+        return result;
+    }
+
+    GeneralResult<SharedPreparedModel> prepareModel(const Model& model,
+        ExecutionPreference preference,
+        Priority priority,
+        OptionalTimePoint deadline,
+        const std::vector<SharedHandle>& modelCache,
+        const std::vector<SharedHandle>& dataCache,
+        const CacheToken& token,
+        const std::vector<android::nn::TokenValuePair>& hints,
+        const std::vector<android::nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::prepareModel()";
+
+        // Validate arguments.
+        if (const auto result = validate(model); !result.ok()) {
+            return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Invalid Model: " << result.error();
+        }
+        if (const auto result = validate(preference); !result.ok()) {
+            return NN_ERROR(ErrorStatus::INVALID_ARGUMENT)
+                << "Invalid ExecutionPreference: " << result.error();
+        }
+        if (const auto result = validate(priority); !result.ok()) {
+            return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Invalid Priority: " << result.error();
+        }
+
+        // Check if deadline has passed.
+        if (hasDeadlinePassed(deadline)) {
+            return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
+        }
+
+        return ArmnnDriverImpl::PrepareArmnnModel(m_Runtime,
+                                                  m_ClTunedParameters,
+                                                  m_Options,
+                                                  model,
+                                                  modelCache,
+                                                  dataCache,
+                                                  token,
+                                                  model.relaxComputationFloat32toFloat16 && m_Options.GetFp16Enabled(),
+                                                  priority);
+    }
+
+    GeneralResult<SharedPreparedModel> prepareModelFromCache(OptionalTimePoint deadline,
+                                                             const std::vector<SharedHandle>& modelCache,
+                                                             const std::vector<SharedHandle>& dataCache,
+                                                             const CacheToken& token) const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::prepareModelFromCache()";
+
+        // Check if deadline has passed.
+        if (hasDeadlinePassed(deadline)) {
+            return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
+        }
+
+        return ArmnnDriverImpl::PrepareArmnnModelFromCache(
+                     m_Runtime,
+                     m_ClTunedParameters,
+                     m_Options,
+                     modelCache,
+                     dataCache,
+                     token,
+                     m_Options.GetFp16Enabled());
+    }
+
+    GeneralResult<SharedBuffer> allocate(const BufferDesc&,
+                                         const std::vector<SharedPreparedModel>&,
+                                         const std::vector<BufferRole>&,
+                                         const std::vector<BufferRole>&) const override
+    {
+        VLOG(DRIVER) << "ArmnnDriver::allocate()";
+        return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "ArmnnDriver::allocate -- does not support allocate.";
+    }
+};
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/ArmnnDriverImpl.cpp b/shim/sl/canonical/ArmnnDriverImpl.cpp
new file mode 100644
index 0000000..3223d9e
--- /dev/null
+++ b/shim/sl/canonical/ArmnnDriverImpl.cpp
@@ -0,0 +1,561 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ArmnnDriverImpl.hpp"
+#include "ArmnnPreparedModel.hpp"
+#include "CacheDataHandler.hpp"
+#include "ModelToINetworkTransformer.hpp"
+#include "SystemPropertiesUtils.hpp"
+
+#include <armnnDeserializer/IDeserializer.hpp>
+
+#include <log/log.h>
+#include <sys/stat.h>
+
+namespace
+{
+
+Capabilities GenerateCapabilities()
+{
+    VLOG(DRIVER) << "ArmnnDriverImpl::GenerateCapabilities()";
+
+    float defaultPerfValue = .1f;
+    const Capabilities::PerformanceInfo defaultPerfInfo = { /* execTime */ defaultPerfValue,
+                                                            /* powerUsage */ defaultPerfValue
+                                                          };
+    std::vector<OperandType> operandsTypes({
+                OperandType::FLOAT32,
+                OperandType::INT32,
+                OperandType::UINT32,
+                OperandType::TENSOR_FLOAT32,
+                OperandType::TENSOR_INT32,
+                OperandType::TENSOR_QUANT8_ASYMM,
+                OperandType::BOOL,
+                OperandType::TENSOR_QUANT16_SYMM,
+                OperandType::TENSOR_FLOAT16,
+                OperandType::TENSOR_BOOL8,
+                OperandType::FLOAT16,
+                OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL,
+                OperandType::TENSOR_QUANT16_ASYMM,
+                OperandType::TENSOR_QUANT8_SYMM,
+                OperandType::TENSOR_QUANT8_ASYMM_SIGNED,
+    });
+
+    std::vector<Capabilities::OperandPerformance> operandPerformances;
+    operandPerformances.reserve(operandsTypes.size());
+
+    for (auto opType : operandsTypes)
+    {
+        operandPerformances.push_back(
+                Capabilities::OperandPerformance{ /* type */ opType, /* info */ defaultPerfInfo });
+    }
+
+    auto operandPerformanceTable =
+               Capabilities::OperandPerformanceTable::create(std::move(operandPerformances)).value();
+
+    return { /* relaxedFloat32toFloat16PerformanceScalar */ defaultPerfInfo,
+             /* relaxedFloat32toFloat16PerformanceTensor */ defaultPerfInfo,
+             /* operandPerformance */ std::move(operandPerformanceTable),
+             /* ifPerformance */ defaultPerfInfo,
+             /* whilePerformance */ defaultPerfInfo };
+}
+
+} // anonymous namespace
+
+using namespace android::nn;
+
+namespace armnn_driver
+{
+
+bool ArmnnDriverImpl::ValidateSharedHandle(const SharedHandle& sharedHandle)
+{
+    bool valid = true;
+
+    if (*sharedHandle < 0)
+    {
+        return !valid;
+    }
+
+    int dataCacheFileAccessMode = fcntl(*sharedHandle, F_GETFL) & O_ACCMODE;
+    if (dataCacheFileAccessMode != O_RDWR)
+    {
+        return !valid;
+    }
+
+    return valid;
+}
+
+bool ArmnnDriverImpl::ValidateDataCacheHandle(const std::vector<SharedHandle>& dataCacheHandle, const size_t dataSize)
+{
+    bool valid = true;
+    // DataCacheHandle size should always be 1 for ArmNN model
+    if (dataCacheHandle.size() != 1)
+    {
+        return !valid;
+    }
+
+    if (dataSize == 0)
+    {
+        return !valid;
+    }
+
+    struct stat statBuffer;
+    if (fstat(*dataCacheHandle[0], &statBuffer) == 0)
+    {
+        unsigned long bufferSize = statBuffer.st_size;
+        if (bufferSize != dataSize)
+        {
+            return !valid;
+        }
+    }
+
+    return ValidateSharedHandle(dataCacheHandle[0]);
+}
+
+std::vector<armnn::NetworkId>& ArmnnDriverImpl::GetLoadedNetworks()
+{
+    return m_NetworkIDs;
+}
+
+GeneralResult<SharedPreparedModel> ArmnnDriverImpl::PrepareArmnnModel(
+    const armnn::IRuntimePtr& runtime,
+    const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+    const DriverOptions& options,
+    const Model& model,
+    const std::vector<SharedHandle>& modelCacheHandle,
+    const std::vector<SharedHandle>& dataCacheHandle,
+    const CacheToken& token,
+    bool float32ToFloat16,
+    Priority priority)
+{
+    VLOG(DRIVER) << "ArmnnDriverImpl::PrepareArmnnModel()";
+
+    if (!runtime)
+    {
+        return NN_ERROR(ErrorStatus::DEVICE_UNAVAILABLE) << "Device unavailable";
+    }
+
+    if (const auto result = validate(model); !result.ok())
+    {
+        return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Invalid model passed as input";
+    }
+
+    // Deliberately ignore any unsupported operations requested by the options -
+    // at this point we're being asked to prepare a model that we've already declared support for
+    // and the operation indices may be different to those in getSupportedOperations anyway.
+    std::set<unsigned int> unsupportedOperations;
+    ModelToINetworkTransformer modelConverter(options.GetBackends(),
+                                              model,
+                                              unsupportedOperations);
+
+    if (modelConverter.GetConversionResult() != ConversionResult::Success)
+    {
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "ModelToINetworkConverter failed";
+    }
+
+    // Serialize the network graph to a .armnn file if an output directory
+    // has been specified in the drivers' arguments.
+    std::vector<uint8_t> dataCacheData;
+    bool serializeToFile = dataCacheHandle.size() < 1 ? false : true;
+    auto serializedNetworkFileName =
+            SerializeNetwork(*modelConverter.GetINetwork(),
+                             options.GetRequestInputsAndOutputsDumpDir(),
+                             dataCacheData,
+                             serializeToFile);
+
+    // Optimize the network
+    armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
+    armnn::OptimizerOptions OptOptions;
+    OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
+    OptOptions.m_ProfilingEnabled = options.IsGpuProfilingEnabled();
+
+    int cachedFd = -1;
+    bool saveCachedNetwork = options.SaveCachedNetwork();
+
+    unsigned int numberOfCachedModelFiles = 0;
+    if (modelCacheHandle.size() > 0)
+    {
+        unsigned int index = 0;
+        for (auto& backend : options.GetBackends())
+        {
+            // modelCacheHandle size should be equal to numberOfCachedModelFiles
+            // modelCacheHandle vector should be in same order as backends
+            auto numberOfCacheFiles = GetNumberOfCacheFiles(backend);
+            if (numberOfCacheFiles > 0)
+            {
+                numberOfCachedModelFiles += numberOfCacheFiles;
+                // For GpuAcc numberOfCachedFiles is 1
+                if (backend == armnn::Compute::GpuAcc)
+                {
+                    cachedFd = *modelCacheHandle[index];
+                    saveCachedNetwork = true;
+                }
+                index += numberOfCachedModelFiles;
+            }
+        }
+    }
+
+    armnn::BackendOptions gpuAcc("GpuAcc",
+    {
+        { "FastMathEnabled", options.IsFastMathEnabled() },
+        { "SaveCachedNetwork", saveCachedNetwork },
+        { "CachedNetworkFilePath", options.GetCachedNetworkFilePath() },
+        { "MLGOTuningFilePath", options.GetClMLGOTunedParametersFile() },
+        { "CachedFileDescriptor", cachedFd }
+    });
+
+    armnn::BackendOptions cpuAcc("CpuAcc",
+    {
+        { "FastMathEnabled", options.IsFastMathEnabled() },
+        { "NumberOfThreads", options.GetNumberOfThreads() }
+    });
+    OptOptions.m_ModelOptions.push_back(gpuAcc);
+    OptOptions.m_ModelOptions.push_back(cpuAcc);
+
+    std::vector<std::string> errMessages;
+    try
+    {
+        optNet = armnn::Optimize(*modelConverter.GetINetwork(),
+                                 options.GetBackends(),
+                                 runtime->GetDeviceSpec(),
+                                 OptOptions,
+                                 errMessages);
+    }
+    catch (std::exception& e)
+    {
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << e.what();
+    }
+
+    // Check that the optimized network is valid.
+    if (!optNet)
+    {
+        std::stringstream message;
+        message << "Invalid optimized network";
+        for (const std::string& msg : errMessages)
+        {
+            message << "\n" << msg;
+        }
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
+    }
+
+    // Export the optimized network graph to a dot file if an output dump directory
+    // has been specified in the drivers' arguments.
+    std::string dotGraphFileName = ExportNetworkGraphToDotFile(*optNet,
+                                                               options.GetRequestInputsAndOutputsDumpDir());
+
+    // Load it into the runtime.
+    armnn::NetworkId netId = 0;
+    std::string msg;
+    armnn::INetworkProperties networkProperties(options.isAsyncModelExecutionEnabled(),
+                                                MemorySource::Undefined,
+                                                MemorySource::Undefined,
+                                                options.IsGpuProfilingEnabled());
+    auto numInputs  = getMainModel(model).inputIndexes.size();
+    auto numOutputs = getMainModel(model).outputIndexes.size();
+    try
+    {
+        if (runtime->LoadNetwork(netId, move(optNet), msg, networkProperties) != armnn::Status::Success)
+        {
+            return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be loaded";
+        }
+    }
+    catch (std::exception& e)
+    {
+        std::stringstream message;
+        message << "Exception (" << e.what()<< ") caught from LoadNetwork.";
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
+    }
+
+    // Now that we have a networkId for the graph rename the exported files to use it
+    // so that we can associate the graph file and the input/output tensor exported files
+    RenameExportedFiles(serializedNetworkFileName,
+                        dotGraphFileName,
+                        options.GetRequestInputsAndOutputsDumpDir(),
+                        netId);
+
+    // Cache the model
+    size_t hashValue = 0;
+    if (dataCacheHandle.size() == 1 )
+    {
+        write(*dataCacheHandle[0], dataCacheData.data(), dataCacheData.size());
+        hashValue = CacheDataHandlerInstance().Hash(dataCacheData);
+    }
+
+    // Cache the model data
+    if (modelCacheHandle.size() > 0)
+    {
+        if (modelCacheHandle.size() == numberOfCachedModelFiles)
+        {
+            for (uint32_t i = 0; i < modelCacheHandle.size(); ++i)
+            {
+                int modelCacheFileAccessMode = fcntl(*modelCacheHandle[i], F_GETFL) & O_ACCMODE;
+                if (modelCacheFileAccessMode != O_RDONLY)
+                {
+                    struct stat statBuffer;
+                    if (fstat(*modelCacheHandle[i], &statBuffer) == 0)
+                    {
+                        long modelDataSize = statBuffer.st_size;
+                        if (modelDataSize > 0)
+                        {
+                            std::vector<uint8_t> modelData(modelDataSize);
+                            pread(*modelCacheHandle[i], modelData.data(), modelData.size(), 0);
+                            hashValue ^= CacheDataHandlerInstance().Hash(modelData);
+                        }
+                    }
+                }
+            }
+        }
+    }
+    if (hashValue != 0)
+    {
+        CacheDataHandlerInstance().Register(token, hashValue, dataCacheData.size());
+    }
+
+    bool executeWithDummyInputs = (std::find(options.GetBackends().begin(),
+                                            options.GetBackends().end(),
+                                            armnn::Compute::GpuAcc) != options.GetBackends().end());
+
+    m_NetworkIDs.push_back(netId);
+    auto preparedModel = std::make_shared<const ArmnnPreparedModel>(netId,
+                                                                    runtime.get(),
+                                                                    model,
+                                                                    options.GetRequestInputsAndOutputsDumpDir(),
+                                                                    options.IsGpuProfilingEnabled(),
+                                                                    priority);
+
+    // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
+    // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
+    // Only run this if the GpuAcc backend has been added to options
+    if (std::find(options.GetBackends().begin(),
+                  options.GetBackends().end(),
+                  armnn::Compute::GpuAcc) != options.GetBackends().end())
+    {
+        if (!preparedModel->ExecuteWithDummyInputs(numInputs, numOutputs))
+        {
+            return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be executed";
+        }
+
+        if (clTunedParameters &&
+            options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters)
+        {
+            // Now that we've done one inference the CL kernel parameters will have been tuned,
+            // so save the updated file.
+            try
+            {
+                clTunedParameters->Save(options.GetClTunedParametersFile().c_str());
+            }
+            catch (std::exception& error)
+            {
+                VLOG(DRIVER) << "ArmnnDriverImpl::prepareModel: Failed to save CL tuned parameters file"
+                             << options.GetClTunedParametersFile().c_str() << error.what();
+            }
+        }
+    }
+    return std::move(preparedModel);
+}
+
+std::vector<armnn::NetworkId> ArmnnDriverImpl::m_NetworkIDs = {};
+
+GeneralResult<SharedPreparedModel> ArmnnDriverImpl::PrepareArmnnModelFromCache(
+    const armnn::IRuntimePtr& runtime,
+    const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+    const DriverOptions& options,
+    const std::vector<SharedHandle>& modelCacheHandle,
+    const std::vector<SharedHandle>& dataCacheHandle,
+    const CacheToken& token,
+    bool float32ToFloat16)
+{
+    VLOG(DRIVER) << "ArmnnDriverImpl::PrepareArmnnModelFromCache()";
+
+    if (!runtime)
+    {
+        return NN_ERROR(ErrorStatus::DEVICE_UNAVAILABLE)
+                            << "ArmnnDriverImpl::prepareModelFromCache(): Device unavailable";
+    }
+
+    if (token.size() != ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN)
+    {
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
+                            << "ArmnnDriverImpl::prepareModelFromCache(): Token size does not match!";
+    }
+
+    // Validate dataCacheHandle
+    auto dataSize = CacheDataHandlerInstance().GetCacheSize(token);
+    if (!ValidateDataCacheHandle(dataCacheHandle, dataSize))
+    {
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
+                            << "ArmnnDriverImpl::prepareModelFromCache(): Not valid data cache handle!";
+    }
+
+    // Check if model files cached they match the expected value
+    unsigned int numberOfCachedModelFiles = 0;
+    for (auto& backend : options.GetBackends())
+    {
+        numberOfCachedModelFiles += GetNumberOfCacheFiles(backend);
+    }
+    if (modelCacheHandle.size() != numberOfCachedModelFiles)
+    {
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
+                           << "ArmnnDriverImpl::prepareModelFromCache(): Model cache handle size does not match.";
+    }
+
+    // Read the model
+    std::vector<uint8_t> dataCacheData(dataSize);
+    pread(*dataCacheHandle[0], dataCacheData.data(), dataCacheData.size(), 0);
+    auto hashValue = CacheDataHandlerInstance().Hash(dataCacheData);
+
+    int gpuAccCachedFd = -1;
+    if (modelCacheHandle.size() > 0)
+    {
+        unsigned int index = 0;
+        for (auto& backend : options.GetBackends())
+        {
+            // modelCacheHandle size should be equal to numberOfCachedModelFiles
+            // modelCacheHandle vector should be in same order as backends
+            auto numberOfCacheFiles = GetNumberOfCacheFiles(backend);
+            if (numberOfCacheFiles > 0)
+            {
+                if (!ValidateSharedHandle(modelCacheHandle[index]))
+                {
+                    return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
+                            << "ArmnnDriverImpl::prepareModelFromCache(): Invalid model cache handle!";
+                }
+                int cachedFd = *modelCacheHandle[index];
+                struct stat statBuffer;
+                if (fstat(cachedFd, &statBuffer) == 0)
+                {
+                    long modelDataSize = statBuffer.st_size;
+                    if (modelDataSize > 0)
+                    {
+                        std::vector<uint8_t> modelData(modelDataSize);
+                        pread(cachedFd, modelData.data(), modelData.size(), 0);
+                        hashValue ^= CacheDataHandlerInstance().Hash(modelData);
+
+                        if (backend == armnn::Compute::GpuAcc)
+                        {
+                            gpuAccCachedFd = cachedFd;
+                        }
+                    }
+                }
+                index += numberOfCacheFiles;
+            }
+        }
+    }
+
+    if (!CacheDataHandlerInstance().Validate(token, hashValue, dataCacheData.size()))
+    {
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
+                << "ArmnnDriverImpl::prepareModelFromCache(): ValidateHash() failed!";
+    }
+
+    // Deserialize the network..
+    armnn::INetworkPtr network = armnn::INetworkPtr(nullptr, [](armnn::INetwork*){});
+    try
+    {
+        network = armnnDeserializer::IDeserializer::Create()->CreateNetworkFromBinary(dataCacheData);
+    }
+    catch (std::exception&)
+    {
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
+                << "ArmnnDriverImpl::prepareModelFromCache(): Exception caught from Deserializer!";
+    }
+
+    // Optimize the network
+    armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
+    armnn::OptimizerOptions OptOptions;
+    OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
+    OptOptions.m_ProfilingEnabled = options.IsGpuProfilingEnabled();
+
+    armnn::BackendOptions gpuAcc("GpuAcc",
+    {
+        { "FastMathEnabled", options.IsFastMathEnabled() },
+        { "SaveCachedNetwork", false },
+        { "CachedNetworkFilePath", options.GetCachedNetworkFilePath() },
+        { "MLGOTuningFilePath", options.GetClMLGOTunedParametersFile() },
+        { "CachedFileDescriptor", gpuAccCachedFd }
+    });
+
+    armnn::BackendOptions cpuAcc("CpuAcc",
+    {
+        { "FastMathEnabled", options.IsFastMathEnabled() },
+        { "NumberOfThreads", options.GetNumberOfThreads() }
+    });
+    OptOptions.m_ModelOptions.push_back(gpuAcc);
+    OptOptions.m_ModelOptions.push_back(cpuAcc);
+
+    std::vector<std::string> errMessages;
+    try
+    {
+        optNet = armnn::Optimize(*network.get(),
+                                 options.GetBackends(),
+                                 runtime->GetDeviceSpec(),
+                                 OptOptions,
+                                 errMessages);
+    }
+    catch (std::exception& e)
+    {
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << e.what();
+    }
+
+    // Check that the optimized network is valid.
+    if (!optNet)
+    {
+        std::stringstream message;
+        message << "Invalid optimized network";
+        for (const std::string& msg : errMessages)
+        {
+            message << "\n" << msg;
+        }
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
+    }
+
+    // Export the optimized network graph to a dot file if an output dump directory
+    // has been specified in the drivers' arguments.
+    std::string dotGraphFileName = ExportNetworkGraphToDotFile(*optNet,
+                                                               options.GetRequestInputsAndOutputsDumpDir());
+
+    // Load it into the runtime.
+    armnn::NetworkId netId = 0;
+    std::string msg;
+    armnn::INetworkProperties networkProperties(options.isAsyncModelExecutionEnabled(),
+                                                MemorySource::Undefined,
+                                                MemorySource::Undefined,
+                                                options.IsGpuProfilingEnabled());
+    try
+    {
+        if (runtime->LoadNetwork(netId, move(optNet), msg, networkProperties) != armnn::Status::Success)
+        {
+            return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "Network could not be loaded";
+        }
+    }
+    catch (std::exception& e)
+    {
+        std::stringstream message;
+        message << "Exception (" << e.what()<< ") caught from LoadNetwork.";
+        return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << message.str();
+    }
+
+    m_NetworkIDs.push_back(netId);
+    return std::make_shared<const ArmnnPreparedModel>(netId,
+                                                      runtime.get(),
+                                                      options.GetRequestInputsAndOutputsDumpDir(),
+                                                      options.IsGpuProfilingEnabled(),
+                                                      Priority::MEDIUM,
+                                                      true);
+}
+
+const Capabilities& ArmnnDriverImpl::GetCapabilities(const armnn::IRuntimePtr& runtime)
+{
+    VLOG(DRIVER) << "ArmnnDriverImpl::GetCapabilities()";
+    static const Capabilities theCapabilities = GenerateCapabilities();
+    return theCapabilities;
+}
+
+void ArmnnDriverImpl::ClearNetworks()
+{
+    m_NetworkIDs.clear();
+}
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/ArmnnDriverImpl.hpp b/shim/sl/canonical/ArmnnDriverImpl.hpp
new file mode 100644
index 0000000..836bf46
--- /dev/null
+++ b/shim/sl/canonical/ArmnnDriverImpl.hpp
@@ -0,0 +1,59 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "DriverOptions.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+
+using namespace android::nn;
+
+namespace armnn_driver
+{
+
+class ArmnnDriverImpl
+{
+public:
+    static GeneralResult<SharedPreparedModel> PrepareArmnnModel(
+        const armnn::IRuntimePtr& runtime,
+        const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+        const DriverOptions& options,
+        const Model& model,
+        const std::vector<SharedHandle>& modelCacheHandle,
+        const std::vector<SharedHandle>& dataCacheHandle,
+        const CacheToken& token,
+        bool float32ToFloat16 = false,
+        Priority priority = Priority::MEDIUM);
+
+    static GeneralResult<SharedPreparedModel> PrepareArmnnModelFromCache(
+        const armnn::IRuntimePtr& runtime,
+        const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
+        const DriverOptions& options,
+        const std::vector<SharedHandle>& modelCacheHandle,
+        const std::vector<SharedHandle>& dataCacheHandle,
+        const CacheToken& token,
+        bool float32ToFloat16 = false);
+
+    static const Capabilities& GetCapabilities(const armnn::IRuntimePtr& runtime);
+
+    static std::vector<armnn::NetworkId>& GetLoadedNetworks();
+
+    static void ClearNetworks();
+
+private:
+    static bool ValidateSharedHandle(const SharedHandle& sharedHandle);
+    static bool ValidateDataCacheHandle(const std::vector<SharedHandle>& dataCacheHandle, const size_t dataSize);
+
+    static std::vector<armnn::NetworkId> m_NetworkIDs;
+};
+
+} // namespace armnn_driver
\ No newline at end of file
diff --git a/shim/sl/canonical/ArmnnPreparedModel.cpp b/shim/sl/canonical/ArmnnPreparedModel.cpp
new file mode 100644
index 0000000..22e0900
--- /dev/null
+++ b/shim/sl/canonical/ArmnnPreparedModel.cpp
@@ -0,0 +1,697 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#define LOG_TAG "arm-armnn-sl"
+
+#include "ArmnnPreparedModel.hpp"
+#include "CanonicalUtils.hpp"
+
+#include <DefaultExecution.h>
+#include <LegacyUtils.h>
+#include <nnapi/IBurst.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/SharedMemory.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+
+#include <memory>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+using namespace android;
+using namespace android::nn;
+
+static const Timing g_NoTiming = {};
+
+namespace {
+
+using namespace armnn_driver;
+
+unsigned long MicrosecondsDuration(android::nn::TimePoint endPoint, android::nn::TimePoint startPoint)
+{
+    return static_cast<unsigned long>(std::chrono::duration_cast<std::chrono::microseconds>(
+                                      endPoint - startPoint).count());
+}
+
+bool ValidateRequestArgument(const Request::Argument& requestArg, const armnn::TensorInfo& tensorInfo)
+{
+    if (requestArg.dimensions.size() != 0)
+    {
+        if (requestArg.dimensions.size() != tensorInfo.GetNumDimensions())
+        {
+            VLOG(DRIVER) << "Mismatched dimensions (request argument: "
+                         << requestArg.dimensions.size() << " expected: " << tensorInfo.GetNumDimensions();
+            return false;
+        }
+
+        for (unsigned int d = 0; d < tensorInfo.GetNumDimensions(); ++d)
+        {
+            if (requestArg.dimensions[d] != 0 && requestArg.dimensions[d] != tensorInfo.GetShape()[d])
+            {
+                VLOG(DRIVER) << "Mismatched dimensions " << d
+                             << " (request argument: " << requestArg.dimensions[d]
+                             << " expected: " << tensorInfo.GetShape()[d];
+                return false;
+            }
+        }
+    }
+
+    return true;
+}
+
+armnn::Tensor GetTensorForRequestArgument(const Request::Argument& requestArg,
+                                          const armnn::TensorInfo& tensorInfo,
+                                          const std::vector<::android::nn::RunTimePoolInfo>& requestPools)
+{
+    if (!ValidateRequestArgument(requestArg, tensorInfo))
+    {
+        return armnn::Tensor();
+    }
+
+    if (requestArg.lifetime == Request::Argument::LifeTime::POINTER)
+    {
+        return armnn::Tensor(tensorInfo, GetMemoryFromPointer(requestArg));
+    }
+    else if (requestArg.lifetime == Request::Argument::LifeTime::POOL)
+    {
+        return armnn::Tensor(tensorInfo, GetMemoryFromPool(requestArg.location, requestPools));
+    }
+    return armnn::Tensor();
+}
+
+inline std::string BuildTensorName(const char* tensorNamePrefix, std::size_t index)
+{
+    return tensorNamePrefix + std::to_string(index);
+}
+
+bool IsPointerTypeMemory(const Request& request)
+{
+    for (auto& input : request.inputs)
+    {
+        if (input.lifetime == Request::Argument::LifeTime::POINTER)
+        {
+            return true;
+        }
+    }
+
+    for (auto& output: request.outputs)
+    {
+        if (output.lifetime == Request::Argument::LifeTime::POINTER)
+        {
+           return true;
+        }
+    }
+
+    return false;
+}
+
+} // anonymous namespace
+
+using namespace android::nn;
+
+namespace armnn_driver
+{
+
+void ArmnnPreparedModel::Init()
+{
+    // Enable profiling if required.
+    m_Runtime->GetProfiler(m_NetworkId)->EnableProfiling(m_GpuProfilingEnabled);
+}
+
+ArmnnPreparedModel::ArmnnPreparedModel(armnn::NetworkId networkId,
+                                       armnn::IRuntime* runtime,
+                                       const Model& model,
+                                       const std::string& requestInputsAndOutputsDumpDir,
+                                       const bool gpuProfilingEnabled,
+                                       Priority priority)
+    : m_NetworkId(networkId)
+    , m_Runtime(runtime)
+    , m_Model(model)
+    , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
+    , m_GpuProfilingEnabled(gpuProfilingEnabled)
+    , m_ModelPriority(priority)
+    , m_PrepareFromCache(false)
+{
+    Init();
+}
+
+ArmnnPreparedModel::ArmnnPreparedModel(armnn::NetworkId networkId,
+                                       armnn::IRuntime* runtime,
+                                       const std::string& requestInputsAndOutputsDumpDir,
+                                       const bool gpuProfilingEnabled,
+                                       Priority priority,
+                                       const bool prepareModelFromCache)
+    : m_NetworkId(networkId)
+    , m_Runtime(runtime)
+    , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
+    , m_GpuProfilingEnabled(gpuProfilingEnabled)
+    , m_ModelPriority(priority)
+    , m_PrepareFromCache(prepareModelFromCache)
+{
+    Init();
+}
+
+
+ErrorStatus ArmnnPreparedModel::PrepareMemoryForInputs(
+    armnn::InputTensors& inputs,
+    const Request& request,
+    const std::vector<android::nn::RunTimePoolInfo>& memPools) const
+{
+    inputs.reserve(request.inputs.size());
+    for (unsigned int i = 0; i < request.inputs.size(); i++)
+    {
+        const auto& inputArg = request.inputs[i];
+
+        armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
+        // inputs (of type InputTensors) is composed of a vector of ConstTensors.
+        // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
+        inputTensorInfo.SetConstant();
+        const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);
+
+        if (inputTensor.GetMemoryArea() == nullptr)
+        {
+            VLOG(DRIVER) << "Cannot execute request. Error converting request input " << i << "to tensor.";
+            return ErrorStatus::GENERAL_FAILURE;
+        }
+        inputs.emplace_back(i, inputTensor);
+    }
+
+    return ErrorStatus::NONE;
+}
+
+ErrorStatus ArmnnPreparedModel::PrepareMemoryForOutputs(
+    armnn::OutputTensors& outputs,
+    std::vector<OutputShape> &outputShapes,
+    const Request& request,
+    const std::vector<android::nn::RunTimePoolInfo>& memPools) const
+{
+    outputs.reserve(request.outputs.size());
+    for (unsigned int i = 0; i < request.outputs.size(); i++)
+    {
+        auto& outputArg = request.outputs[i];
+
+        armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+        armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
+        if (outputTensor.GetMemoryArea() == nullptr)
+        {
+            VLOG(DRIVER) << "Cannot execute request. Error converting request output " << i << "to tensor.";
+            return ErrorStatus::GENERAL_FAILURE;
+        }
+
+        const size_t outputSize = outputTensorInfo.GetNumBytes();
+
+        unsigned int count = 0;
+        std::for_each(outputArg.dimensions.begin(), outputArg.dimensions.end(), [&](auto dim)
+        {
+            if (dim != 0)
+            {
+                outputTensorInfo.GetShape()[count] = dim;
+            }
+            else
+            {
+                outputTensorInfo.GetShape()[count] = outputArg.dimensions.size();
+            }
+
+            count++;
+        });
+
+        outputs.emplace_back(i, outputTensor);
+        outputShapes[i] = ComputeShape(outputTensorInfo);
+
+        if (outputArg.location.length < outputSize)
+        {
+            VLOG(DRIVER) << "ArmnnPreparedModel::Execute failed outputArg.location.length "
+                  << std::to_string(outputArg.location.length).c_str()
+                  << " < outputSize " << std::to_string(outputSize).c_str();
+            outputShapes[i].isSufficient = false;
+            return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+        }
+
+        //TODO: Need to check for Request::Argument::LifeTime::POINTER
+        if (outputArg.lifetime == Request::Argument::LifeTime::POOL)
+        {
+            size_t bufferSize = memPools.at(outputArg.location.poolIndex).getSize();
+            if (bufferSize < outputSize)
+            {
+                VLOG(DRIVER) << "ArmnnPreparedModel::Execute failed bufferSize "
+                             << std::to_string(outputArg.location.length).c_str()
+                             << " < outputSize " << std::to_string(outputSize).c_str();
+                outputShapes[i].isSufficient = false;
+                return ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
+            }
+        }
+    }
+    return ErrorStatus::NONE;
+}
+
+ErrorStatus ArmnnPreparedModel::PrepareMemoryForIO(armnn::InputTensors& inputs,
+                                                   armnn::OutputTensors& outputs,
+                                                   std::vector<android::nn::RunTimePoolInfo>& memPools,
+                                                   const Request& request) const
+{
+    //Check memory pools are not empty
+    // add the inputs and outputs with their data
+    try
+    {
+        if (!setRunTimePoolInfosFromMemoryPools(&memPools, request.pools))
+        {
+            return ErrorStatus::INVALID_ARGUMENT;
+        }
+
+        if (PrepareMemoryForInputs(inputs, request, memPools) != ErrorStatus::NONE)
+        {
+            VLOG(DRIVER) << "Failed when preparing memory for Inputs";
+            return ErrorStatus::GENERAL_FAILURE;
+        }
+
+        std::vector<OutputShape> outputShapes(request.outputs.size());
+
+        auto errorStatus = PrepareMemoryForOutputs(outputs, outputShapes, request, memPools);
+        if (errorStatus != ErrorStatus::NONE)
+        {
+            return errorStatus;
+        }
+    }
+    catch (armnn::Exception& e)
+    {
+        VLOG(DRIVER) << "armnn::Exception caught while preparing for EnqueueWorkload: " << e.what();
+        return ErrorStatus::GENERAL_FAILURE;
+    }
+    catch (std::exception& e)
+    {
+        VLOG(DRIVER) << "std::exception caught while preparing for EnqueueWorkload: " << e.what();
+        return ErrorStatus::GENERAL_FAILURE;
+    }
+
+    return ErrorStatus::NONE;
+}
+
+ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> ArmnnPreparedModel::execute(
+    const Request& request,
+    MeasureTiming measureTiming,
+    const OptionalTimePoint& deadline,
+    const OptionalDuration&,
+    const std::vector<android::nn::TokenValuePair>& hints,
+    const std::vector<android::nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const
+{
+    VLOG(DRIVER) << "CanonicalDriver::PreparedModel::execute()";
+
+    CanonicalExecutionContext ctx;
+    if (measureTiming == MeasureTiming::YES)
+    {
+        ctx.measureTimings = measureTiming;
+        ctx.driverStart =  Clock::now();
+    }
+
+    if (!m_PrepareFromCache)
+    {
+        const auto modelRequest = validateRequestForModel(request, m_Model);
+        if (!modelRequest.ok())
+        {
+            return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << modelRequest.error();
+        }
+        VLOG(DRIVER) << "ArmnnPreparedModel::execute(): " << GetModelSummary(m_Model).c_str();
+    }
+    if (hasDeadlinePassed(deadline)) {
+        return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
+    }
+
+    // map the memory pool into shared pointers
+    // use a shared memory pools vector on the heap, as it is passed to the request thread
+    auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>();
+
+    // allocate the tensors on the heap, as they are passed to the request thread
+    auto inputTensors = std::make_shared<armnn::InputTensors>();
+    auto outputTensors = std::make_shared<armnn::OutputTensors>();
+
+    ErrorStatus theErrorStatus = ErrorStatus::NONE;
+
+    auto isPointerTypeMemory = IsPointerTypeMemory(request);
+    nn::RequestRelocation relocation;
+    if (isPointerTypeMemory)
+    {
+        std::optional<nn::Request> maybeRequestInShared;
+        auto executionResult =
+                nn::convertRequestFromPointerToShared(
+                        &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+                        &maybeRequestInShared, &relocation);
+        if(!executionResult.has_value())
+        {
+            VLOG(DRIVER) << "ArmnnPreparedModel::PrepareMemoryForIO::Failed to convertRequestFromPointerToShared.";
+            return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
+                          << "ArmnnPreparedModel convertRequestFromPointerToShared failed";
+        }
+        const nn::Request& requestInShared = std::move(executionResult).value();
+        if (relocation.input)
+        {
+            relocation.input->flush();
+        }
+
+        theErrorStatus = PrepareMemoryForIO(*inputTensors, *outputTensors, *memPools, requestInShared);
+    }
+    else
+    {
+        theErrorStatus = PrepareMemoryForIO(*inputTensors, *outputTensors, *memPools, request);
+    }
+
+    switch(theErrorStatus)
+    {
+        case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
+            return NN_ERROR(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE);
+        case ErrorStatus::GENERAL_FAILURE:
+            return NN_ERROR(ErrorStatus::GENERAL_FAILURE);
+        case ErrorStatus::INVALID_ARGUMENT:
+            return NN_ERROR(ErrorStatus::INVALID_ARGUMENT);
+        default:
+        {}
+    }
+
+    std::vector<OutputShape> outputShapes(outputTensors->size());
+    for (unsigned int i = 0; i < outputTensors->size(); i++)
+    {
+        std::pair<int, armnn::Tensor> outputTensorPair = (*outputTensors)[i];
+        const armnn::Tensor outputTensor = outputTensorPair.second;
+        const armnn::TensorInfo outputTensorInfo = outputTensor.GetInfo();
+
+        outputShapes[i] = ComputeShape(outputTensorInfo);
+    }
+    Timing theTiming;
+
+    VLOG(DRIVER) << "ArmnnPreparedModel::execute(...) before ExecuteGraph";
+    auto errorStatus = ExecuteGraph(memPools, *inputTensors, *outputTensors, ctx);
+    if (errorStatus != ErrorStatus::NONE)
+    {
+        return NN_ERROR(errorStatus) << "execute() failed";
+    }
+    VLOG(DRIVER) << "ArmnnPreparedModel::execute(...) after ExecuteGraph";
+    if (isPointerTypeMemory && relocation.output)
+    {
+        relocation.output->flush();
+    }
+
+    return std::make_pair(outputShapes, theTiming);
+}
+
+ErrorStatus ArmnnPreparedModel::ExecuteGraph(
+    std::shared_ptr<std::vector<android::nn::RunTimePoolInfo>>& pMemPools,
+    armnn::InputTensors& inputTensors,
+    armnn::OutputTensors& outputTensors,
+    CanonicalExecutionContext ctx) const
+{
+    VLOG(DRIVER) << "ArmnnPreparedModel::ExecuteGraph(...)";
+
+    DumpTensorsIfRequired("Input", inputTensors);
+
+    try
+    {
+        if (ctx.measureTimings == MeasureTiming::YES)
+        {
+            ctx.deviceStart =  Clock::now();
+        }
+        armnn::Status status;
+        VLOG(DRIVER) << "ArmnnPreparedModel::ExecuteGraph m_AsyncModelExecutionEnabled false";
+
+        status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors);
+
+        if (ctx.measureTimings == MeasureTiming::YES)
+        {
+            ctx.deviceEnd =  Clock::now();
+        }
+        if (status != armnn::Status::Success)
+        {
+            VLOG(DRIVER) << "ArmnnPreparedModel:ExecuteGraph EnqueueWorkload failed";
+            return ErrorStatus::GENERAL_FAILURE;
+        }
+    }
+    catch (armnn::Exception& e)
+    {
+        VLOG(DRIVER) << "armnn:Exception caught from EnqueueWorkload: " << e.what();
+        return ErrorStatus::GENERAL_FAILURE;
+    }
+    catch (std::exception& e)
+    {
+        VLOG(DRIVER) << "std::exception caught from EnqueueWorkload: " << e.what();
+        return ErrorStatus::GENERAL_FAILURE;
+    }
+
+    CommitPools(*pMemPools);
+    DumpTensorsIfRequired("Output", outputTensors);
+
+    if (ctx.measureTimings == MeasureTiming::YES)
+    {
+        ctx.driverEnd =  Clock::now();
+        Timing timing;
+        timing.timeOnDevice = ctx.deviceEnd - ctx.deviceStart;
+        timing.timeInDriver = ctx.driverEnd - ctx.driverStart;
+        VLOG(DRIVER) << "ArmnnPreparedModel::execute timing - Device = "
+                     << timing.timeOnDevice << "Driver = " <<  timing.timeInDriver;
+    }
+    return ErrorStatus::NONE;
+}
+
+Priority ArmnnPreparedModel::GetModelPriority() const
+{
+    return m_ModelPriority;
+}
+
+
+GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>> ArmnnPreparedModel::executeFenced(
+    const Request& request,
+    const std::vector<SyncFence>& waitFor,
+    MeasureTiming measureTiming,
+    const OptionalTimePoint& deadline,
+    const OptionalDuration&,
+    const OptionalDuration&,
+    const std::vector<android::nn::TokenValuePair>& hints,
+    const std::vector<android::nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const
+{
+    VLOG(DRIVER) << "ArmnnPreparedModel::executeFenced()";
+
+    if (!m_PrepareFromCache) {
+        const auto modelRequest = validateRequestForModel(request, m_Model);
+        if (!modelRequest.ok())
+        {
+            return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << modelRequest.error();
+        }
+        VLOG(DRIVER) << "ArmnnPreparedModel::executeFenced(): " << GetModelSummary(m_Model).c_str();
+    }
+    if (hasDeadlinePassed(deadline))
+    {
+        return NN_ERROR(ErrorStatus::MISSED_DEADLINE_PERSISTENT);
+    }
+
+    CanonicalExecutionContext ctx;
+    if (measureTiming == MeasureTiming::YES)
+    {
+        ctx.measureTimings = measureTiming;
+        ctx.driverStart =  Clock::now();
+    }
+
+    // Wait for the dependent events to signal
+    for (const auto& syncFence : waitFor)
+    {
+        if (!syncFence.getSharedHandle())
+        {
+            return NN_ERROR(ErrorStatus::INVALID_ARGUMENT);
+        }
+        if (syncFence.syncWait({}) != SyncFence::FenceState::SIGNALED)
+        {
+            return NN_ERROR(ErrorStatus::GENERAL_FAILURE) << "syncWait failed";
+        }
+    }
+
+    android::nn::TimePoint fenceExecutionStart;
+    if (measureTiming == MeasureTiming::YES)
+    {
+        fenceExecutionStart = Clock::now();
+    }
+
+    // map the memory pool into shared pointers
+    // use a shared memory pools vector on the heap, as it is passed to the request thread
+    auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>();
+
+    // allocate the tensors on the heap, as they are passed to the request thread
+    auto inputTensors = std::make_shared<armnn::InputTensors>();
+    auto outputTensors = std::make_shared<armnn::OutputTensors>();
+
+    ErrorStatus theErrorStatus = ErrorStatus::NONE;
+
+    auto isPointerTypeMemory = IsPointerTypeMemory(request);
+    nn::RequestRelocation relocation;
+    if (isPointerTypeMemory)
+    {
+        std::optional<nn::Request> maybeRequestInShared;
+        auto executionResult =
+                nn::convertRequestFromPointerToShared(
+                        &request, nn::kDefaultRequestMemoryAlignment, nn::kMinMemoryPadding,
+                        &maybeRequestInShared, &relocation);
+        if(!executionResult.has_value())
+        {
+            VLOG(DRIVER) << "ArmnnPreparedModel::PrepareMemoryForIO::Failed to convertRequestFromPointerToShared.";
+            return NN_ERROR(ErrorStatus::GENERAL_FAILURE)
+                                     << "ArmnnPreparedModel convertRequestFromPointerToShared failed";
+        }
+        const nn::Request& requestInShared = std::move(executionResult).value();
+        if (relocation.input)
+        {
+            relocation.input->flush();
+        }
+
+        theErrorStatus = PrepareMemoryForIO(*inputTensors, *outputTensors, *memPools, requestInShared);
+    }
+    else
+    {
+        theErrorStatus = PrepareMemoryForIO(*inputTensors, *outputTensors, *memPools, request);
+    }
+
+    if (theErrorStatus != ErrorStatus::NONE)
+    {
+        return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "executeFenced() failed";
+    }
+
+    Timing timingSinceLaunch = {};
+    Timing timingAfterFence  = {};
+    if (measureTiming == MeasureTiming::YES)
+    {
+        timingAfterFence.timeOnDevice = ctx.deviceEnd - ctx.deviceStart;
+        timingAfterFence.timeInDriver = ctx.driverEnd - fenceExecutionStart;
+        VLOG(DRIVER) << "executeFenced timingSinceLaunch = " << timingAfterFence.timeOnDevice;
+        VLOG(DRIVER) << "executeFenced timingAfterFence = " << timingAfterFence.timeInDriver;
+    }
+
+    VLOG(DRIVER) << "ArmnnCanonicalPreparedModel::executeFenced(...) before ExecuteGraph";
+    auto errorStatus = ExecuteGraph(memPools, *inputTensors, *outputTensors, ctx);
+    VLOG(DRIVER) << "ArmnnCanonicalPreparedModel::executeFenced(...) after ExecuteGraph";
+    if (isPointerTypeMemory && relocation.output)
+    {
+         relocation.output->flush();
+    }
+
+    ExecuteFencedInfoCallback armnnFencedExecutionCallback =
+            [timingSinceLaunch, timingAfterFence, errorStatus]() {
+
+                GeneralResult<std::pair<Timing, Timing>> result;
+
+                switch(errorStatus)
+                {
+                    case ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
+                        result.error().code = (ErrorStatus::OUTPUT_INSUFFICIENT_SIZE);
+                    case ErrorStatus::GENERAL_FAILURE:
+                        result.error().code = (ErrorStatus::GENERAL_FAILURE);
+                    case ErrorStatus::INVALID_ARGUMENT:
+                        result.error().code = (ErrorStatus::INVALID_ARGUMENT);
+                    default:
+                    {
+                        result.value() = std::make_pair(timingSinceLaunch, timingAfterFence);
+                    }
+                }
+                return result;
+            };
+    return std::make_pair(SyncFence::createAsSignaled(), std::move(armnnFencedExecutionCallback ));
+}
+
+GeneralResult<SharedExecution> ArmnnPreparedModel::createReusableExecution(
+    const Request& request,
+    MeasureTiming measureTiming,
+    const OptionalDuration& loopTimeoutDuration,
+    const std::vector<android::nn::TokenValuePair>& hints,
+    const std::vector<android::nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const
+{
+    VLOG(DRIVER) << "ArmnnPreparedModel::createReusableExecution()";
+    return std::make_shared<DefaultExecution>(shared_from_this(),
+                                              request,
+                                              measureTiming,
+                                              loopTimeoutDuration);
+}
+
+GeneralResult<SharedBurst> ArmnnPreparedModel::configureExecutionBurst() const
+{
+    // TODO: Implement BURST
+    return nullptr;
+}
+
+std::any ArmnnPreparedModel::getUnderlyingResource() const
+{
+    return &m_Model;
+}
+
+template<typename TensorBindingCollection>
+void ArmnnPreparedModel::DumpTensorsIfRequired(char const* tensorNamePrefix,
+                                               const TensorBindingCollection& tensorBindings) const
+{
+    if (!m_RequestInputsAndOutputsDumpDir.empty())
+    {
+        const std::string requestName = std::to_string(m_NetworkId) + ".dump";
+        for (std::size_t i = 0u; i < tensorBindings.size(); ++i)
+        {
+            DumpTensor(m_RequestInputsAndOutputsDumpDir,
+                       requestName,
+                       BuildTensorName(tensorNamePrefix, i),
+                       tensorBindings[i].second);
+        }
+    }
+}
+
+ArmnnPreparedModel::~ArmnnPreparedModel()
+{
+    VLOG(DRIVER) << "ArmnnPreparedModel::~ArmnnPreparedModel()";
+    // Get a hold of the profiler used by this model.
+    if (m_GpuProfilingEnabled)
+    {
+        auto profiler = m_Runtime->GetProfiler(m_NetworkId);
+        if (profiler)
+        {
+            // Dump the profiling info to a file if required.
+            DumpJsonProfilingIfRequired(m_GpuProfilingEnabled,
+                                        m_RequestInputsAndOutputsDumpDir,
+                                        m_NetworkId,
+                                        profiler.get());
+        }
+    }
+    // Unload the network associated with this model
+    m_Runtime->UnloadNetwork(m_NetworkId);
+}
+
+bool ArmnnPreparedModel::ExecuteWithDummyInputs(unsigned int numInputs, unsigned int numOutputs) const
+{
+    std::vector<std::vector<char>> storage;
+    armnn::InputTensors inputTensors;
+    for (unsigned int i = 0; i < numInputs; i++)
+    {
+        armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
+        // pInputTensors (of type InputTensors) is composed of a vector of ConstTensors.
+        // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
+        inputTensorInfo.SetConstant();
+        storage.emplace_back(inputTensorInfo.GetNumBytes());
+        const armnn::ConstTensor inputTensor(inputTensorInfo, storage.back().data());
+
+        inputTensors.emplace_back(i, inputTensor);
+    }
+
+    armnn::OutputTensors outputTensors;
+    for (unsigned int i = 0; i < numOutputs; i++)
+    {
+        const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+        storage.emplace_back(outputTensorInfo.GetNumBytes());
+        const armnn::Tensor outputTensor(outputTensorInfo, storage.back().data());
+
+        outputTensors.emplace_back(i, outputTensor);
+    }
+    CanonicalExecutionContext ctx;
+    ctx.measureTimings = MeasureTiming::NO;
+    auto memPools = std::make_shared<std::vector<::android::nn::RunTimePoolInfo>>();
+
+    auto errorStatus = ExecuteGraph(memPools,
+                                    inputTensors,
+                                    outputTensors,
+                                    ctx);
+
+    return errorStatus == ErrorStatus::NONE;
+}
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/ArmnnPreparedModel.hpp b/shim/sl/canonical/ArmnnPreparedModel.hpp
new file mode 100644
index 0000000..e97e7d7
--- /dev/null
+++ b/shim/sl/canonical/ArmnnPreparedModel.hpp
@@ -0,0 +1,132 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "ArmnnDriver.hpp"
+#include "ArmnnDriverImpl.hpp"
+#include "ModelToINetworkTransformer.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <BufferTracker.h>
+#include <CpuExecutor.h>
+#include <nnapi/IExecution.h>
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <memory>
+#include <tuple>
+#include <utility>
+#include <vector>
+#include <string>
+
+namespace armnn_driver
+{
+    struct CanonicalExecutionContext
+    {
+        ::android::nn::MeasureTiming    measureTimings =
+                ::android::nn::MeasureTiming::NO;
+        android::nn::TimePoint driverStart;
+        android::nn::TimePoint driverEnd;
+        android::nn::TimePoint deviceStart;
+        android::nn::TimePoint deviceEnd;
+    };
+class ArmnnPreparedModel final : public IPreparedModel,
+                                 public std::enable_shared_from_this<ArmnnPreparedModel>
+{
+public:
+    ArmnnPreparedModel(armnn::NetworkId networkId,
+                       armnn::IRuntime* runtime,
+                       const Model& model,
+                       const std::string& requestInputsAndOutputsDumpDir,
+                       const bool gpuProfilingEnabled,
+                       Priority priority = Priority::MEDIUM);
+
+    ArmnnPreparedModel(armnn::NetworkId networkId,
+                       armnn::IRuntime* runtime,
+                       const std::string& requestInputsAndOutputsDumpDir,
+                       const bool gpuProfilingEnabled,
+                       Priority priority = Priority::MEDIUM,
+                       const bool prepareModelFromCache = false);
+
+    virtual ~ArmnnPreparedModel();
+
+    ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> execute(
+        const Request& request,
+        MeasureTiming measureTiming,
+        const OptionalTimePoint& deadline,
+        const OptionalDuration& loopTimeoutDuration,
+        const std::vector<android::nn::TokenValuePair>& hints,
+        const std::vector<android::nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
+
+    GeneralResult<std::pair<SyncFence, ExecuteFencedInfoCallback>> executeFenced(
+        const Request& request,
+        const std::vector<SyncFence>& waitFor,
+        MeasureTiming measureTiming,
+        const OptionalTimePoint& deadline,
+        const OptionalDuration& loopTimeoutDuration,
+        const OptionalDuration& timeoutDurationAfterFence,
+        const std::vector<android::nn::TokenValuePair>& hints,
+        const std::vector<android::nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
+
+    GeneralResult<android::nn::SharedExecution> createReusableExecution(
+        const Request& request,
+        MeasureTiming measureTiming,
+        const OptionalDuration& loopTimeoutDuration,
+        const std::vector<android::nn::TokenValuePair>& hints,
+        const std::vector<android::nn::ExtensionNameAndPrefix>& extensionNameToPrefix) const override;
+
+    GeneralResult<SharedBurst> configureExecutionBurst() const override;
+
+    std::any getUnderlyingResource() const override;
+
+    /// execute the graph prepared from the request
+    ErrorStatus ExecuteGraph(
+        std::shared_ptr<std::vector<android::nn::RunTimePoolInfo>>& pMemPools,
+        armnn::InputTensors& inputTensors,
+        armnn::OutputTensors& outputTensors,
+        CanonicalExecutionContext  callback) const;
+
+    Priority GetModelPriority() const;
+
+    /// Executes this model with dummy inputs (e.g. all zeroes).
+    /// \return false on failure, otherwise true
+    bool ExecuteWithDummyInputs(unsigned int numInputs, unsigned int numOutputs) const;
+
+private:
+    void Init();
+    ErrorStatus PrepareMemoryForInputs(
+        armnn::InputTensors& inputs,
+        const Request& request,
+        const std::vector<android::nn::RunTimePoolInfo>& memPools) const;
+
+    ErrorStatus PrepareMemoryForOutputs(
+        armnn::OutputTensors& outputs,
+        std::vector<OutputShape> &outputShapes,
+        const Request& request,
+        const std::vector<android::nn::RunTimePoolInfo>& memPools) const;
+
+    ErrorStatus PrepareMemoryForIO(armnn::InputTensors& inputs,
+                                   armnn::OutputTensors& outputs,
+                                   std::vector<android::nn::RunTimePoolInfo>& memPools,
+                                   const Request& request) const;
+
+    template <typename TensorBindingCollection>
+    void DumpTensorsIfRequired(char const* tensorNamePrefix, const TensorBindingCollection& tensorBindings) const;
+
+    /// schedule the graph prepared from the request for execution
+    armnn::NetworkId                        m_NetworkId;
+    armnn::IRuntime*                        m_Runtime;
+
+    const Model                             m_Model;
+    const std::string&                      m_RequestInputsAndOutputsDumpDir;
+    const bool                              m_GpuProfilingEnabled;
+    Priority                                m_ModelPriority;
+    const bool                              m_PrepareFromCache;
+};
+
+}
diff --git a/shim/sl/canonical/CacheDataHandler.cpp b/shim/sl/canonical/CacheDataHandler.cpp
new file mode 100644
index 0000000..930a8e4
--- /dev/null
+++ b/shim/sl/canonical/CacheDataHandler.cpp
@@ -0,0 +1,69 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "CacheDataHandler.hpp"
+
+#include <log/log.h>
+
+namespace armnn_driver
+{
+
+CacheDataHandler& CacheDataHandlerInstance()
+{
+    static CacheDataHandler instance;
+    return instance;
+}
+
+void CacheDataHandler::Register(const android::nn::CacheToken token, const size_t hashValue, const size_t cacheSize)
+{
+    if (!m_CacheDataMap.empty()
+            && m_CacheDataMap.find(hashValue) != m_CacheDataMap.end()
+            && m_CacheDataMap.at(hashValue).GetToken() == token
+            && m_CacheDataMap.at(hashValue).GetCacheSize() == cacheSize)
+    {
+        return;
+    }
+    CacheHandle cacheHandle(token, cacheSize);
+    m_CacheDataMap.insert({hashValue, cacheHandle});
+}
+
+bool CacheDataHandler::Validate(const android::nn::CacheToken token,
+                                const size_t hashValue,
+                                const size_t cacheSize) const
+{
+    return (!m_CacheDataMap.empty()
+            && m_CacheDataMap.find(hashValue) != m_CacheDataMap.end()
+            && m_CacheDataMap.at(hashValue).GetToken() == token
+            && m_CacheDataMap.at(hashValue).GetCacheSize() == cacheSize);
+}
+
+size_t CacheDataHandler::Hash(std::vector<uint8_t>& cacheData)
+{
+    std::size_t hash = cacheData.size();
+    for (auto& i : cacheData)
+    {
+        hash = ((hash << 5) - hash) + i;
+    }
+    return hash;
+}
+
+size_t CacheDataHandler::GetCacheSize(android::nn::CacheToken token)
+{
+    for (auto i = m_CacheDataMap.begin(); i != m_CacheDataMap.end(); ++i)
+    {
+        if (i->second.GetToken() == token)
+        {
+            return i->second.GetCacheSize();
+        }
+    }
+    return 0;
+}
+
+void CacheDataHandler::Clear()
+{
+    m_CacheDataMap.clear();
+}
+
+} // armnn_driver
diff --git a/shim/sl/canonical/CacheDataHandler.hpp b/shim/sl/canonical/CacheDataHandler.hpp
new file mode 100644
index 0000000..95464a9
--- /dev/null
+++ b/shim/sl/canonical/CacheDataHandler.hpp
@@ -0,0 +1,64 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <vector>
+#include <unordered_map>
+
+#include <nnapi/Types.h>
+
+namespace armnn_driver
+{
+
+class CacheHandle
+{
+public:
+    CacheHandle(const android::nn::CacheToken token, const size_t cacheSize)
+    : m_CacheToken(token), m_CacheSize(cacheSize) {}
+
+    ~CacheHandle() {};
+
+    android::nn::CacheToken GetToken() const
+    {
+        return m_CacheToken;
+    }
+
+    size_t GetCacheSize() const
+    {
+        return m_CacheSize;
+    }
+
+private:
+    const android::nn::CacheToken m_CacheToken;
+    const size_t m_CacheSize;
+};
+
+class CacheDataHandler
+{
+public:
+    CacheDataHandler() {}
+    ~CacheDataHandler() {}
+
+    void Register(const android::nn::CacheToken token, const size_t hashValue, const size_t cacheSize);
+
+    bool Validate(const android::nn::CacheToken token, const size_t hashValue, const size_t cacheSize) const;
+
+    size_t Hash(std::vector<uint8_t>& cacheData);
+
+    size_t GetCacheSize(android::nn::CacheToken token);
+
+    void Clear();
+
+private:
+    CacheDataHandler(const CacheDataHandler&) = delete;
+    CacheDataHandler& operator=(const CacheDataHandler&) = delete;
+
+    std::unordered_map<size_t, CacheHandle> m_CacheDataMap;
+};
+
+CacheDataHandler& CacheDataHandlerInstance();
+
+} // armnn_driver
diff --git a/shim/sl/canonical/CanonicalUtils.cpp b/shim/sl/canonical/CanonicalUtils.cpp
new file mode 100644
index 0000000..713629f
--- /dev/null
+++ b/shim/sl/canonical/CanonicalUtils.cpp
@@ -0,0 +1,611 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#define LOG_TAG "arm-armnn-sl"
+
+#include "CanonicalUtils.hpp"
+
+#include <armnn/Utils.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnnSerializer/ISerializer.hpp>
+#include <armnnUtils/Permute.hpp>
+
+#include <ghc/filesystem.hpp>
+namespace fs = ghc::filesystem;
+#include <half/half.hpp>
+#include <log/log.h>
+
+#include <cassert>
+#include <cerrno>
+#include <cinttypes>
+#include <cstdio>
+#include <sstream>
+#include <time.h>
+#include <variant>
+
+namespace armnn
+{
+using Half = half_float::half; //import half float implementation
+} //namespace armnn
+
+using namespace android;
+using namespace android::nn;
+
+namespace armnn_driver
+{
+const armnn::PermutationVector g_DontPermute{};
+
+void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo& tensorInfo,
+                                     const void* input,
+                                     void* output,
+                                     const armnn::PermutationVector& mappings)
+{
+    assert(tensorInfo.GetNumDimensions() == 4U);
+
+    armnn::DataType dataType = tensorInfo.GetDataType();
+    switch (dataType)
+    {
+    case armnn::DataType::Float16:
+    case armnn::DataType::Float32:
+    case armnn::DataType::QAsymmU8:
+    case armnn::DataType::QSymmS8:
+    case armnn::DataType::QAsymmS8:
+        // First swizzle tensor info
+        tensorInfo = armnnUtils::Permuted(tensorInfo, mappings);
+        // Then swizzle tensor data
+        armnnUtils::Permute(tensorInfo.GetShape(), mappings, input, output, armnn::GetDataTypeSize(dataType));
+        break;
+    default:
+        VLOG(DRIVER) << "Unknown armnn::DataType for swizzling";
+        assert(0);
+    }
+}
+
+void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
+{
+    // find the location within the pool
+    assert(location.poolIndex < memPools.size());
+
+    const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
+    uint8_t* memPoolBuffer = memPool.getBuffer();
+    uint8_t* memory = memPoolBuffer + location.offset;
+    return memory;
+}
+
+void* GetMemoryFromPointer(const Request::Argument& requestArg)
+{
+    // get the pointer memory
+    auto ptrMemory = std::visit([](std::variant<const void*, void*>&& memory)
+                                {
+                                    if (std::holds_alternative<const void*>(memory))
+                                    {
+                                        auto ptr = std::get<const void*>(memory);
+                                        auto ptrMemory = static_cast<const uint8_t*>(ptr);
+                                        return const_cast<uint8_t*>(ptrMemory);
+                                    }
+                                    else
+                                    {
+                                        auto ptr = std::get<void*>(memory);
+                                        return static_cast<uint8_t*>(ptr);
+                                    }
+                                }, requestArg.location.pointer);
+    return ptrMemory;
+}
+
+armnn::TensorInfo GetTensorInfoForOperand(const Operand& operand)
+{
+    using namespace armnn;
+    bool perChannel = false;
+    bool isScalar   = false;
+
+    DataType type;
+    switch (operand.type)
+    {
+        case OperandType::TENSOR_BOOL8:
+            type = armnn::DataType::Boolean;
+            break;
+        case OperandType::TENSOR_FLOAT32:
+            type = armnn::DataType::Float32;
+            break;
+        case OperandType::TENSOR_FLOAT16:
+            type = armnn::DataType::Float16;
+            break;
+        case OperandType::TENSOR_QUANT8_ASYMM:
+            type = armnn::DataType::QAsymmU8;
+            break;
+        case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
+            perChannel=true;
+            ARMNN_FALLTHROUGH;
+        case OperandType::TENSOR_QUANT8_SYMM:
+            type = armnn::DataType::QSymmS8;
+            break;
+        case OperandType::TENSOR_QUANT16_SYMM:
+            type = armnn::DataType::QSymmS16;
+            break;
+        case OperandType::TENSOR_INT32:
+            type = armnn::DataType::Signed32;
+            break;
+        case OperandType::INT32:
+            type = armnn::DataType::Signed32;
+            isScalar = true;
+            break;
+        case OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
+            type = armnn::DataType::QAsymmS8;
+            break;
+        default:
+            throw UnsupportedOperand<OperandType>(operand.type);
+    }
+
+    TensorInfo ret;
+    if (isScalar)
+    {
+        ret = TensorInfo(TensorShape(armnn::Dimensionality::Scalar), type);
+    }
+    else
+    {
+        if (operand.dimensions.size() == 0)
+        {
+            TensorShape tensorShape(Dimensionality::NotSpecified);
+            ret = TensorInfo(tensorShape, type);
+        }
+        else
+        {
+            bool dimensionsSpecificity[5] = { true, true, true, true, true };
+            int count = 0;
+            std::for_each(operand.dimensions.data(),
+                          operand.dimensions.data() +  operand.dimensions.size(),
+                          [&](const unsigned int val)
+                          {
+                              if (val == 0)
+                              {
+                                  dimensionsSpecificity[count] = false;
+                              }
+                              count++;
+                          });
+
+            TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity);
+            ret = TensorInfo(tensorShape, type);
+        }
+    }
+
+    if (perChannel)
+    {
+        // ExtraParams is expected to be of type channelQuant
+        const auto& perAxisQuantParams = std::get<Operand::SymmPerChannelQuantParams>(operand.extraParams);
+
+        ret.SetQuantizationScales(perAxisQuantParams.scales);
+        ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
+    }
+    else
+    {
+        ret.SetQuantizationScale(operand.scale);
+        ret.SetQuantizationOffset(operand.zeroPoint);
+    }
+    return ret;
+}
+
+std::string GetOperandSummary(const Operand& operand)
+{
+    std::stringstream ss;
+    ss << "operand dimensions: [ ";
+    for (unsigned int i = 0; i < operand.dimensions.size(); ++i)
+    {
+        ss << operand.dimensions[i] << " ";
+    }
+    ss << "] operand type: " << operand.type;
+    return ss.str();
+}
+
+using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
+                                     unsigned int elementIndex,
+                                     std::ofstream& fileStream);
+
+namespace
+{
+template <typename ElementType, typename PrintableType = ElementType>
+void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream)
+{
+    const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
+    fileStream << static_cast<PrintableType>(elements[elementIndex]) << " ";
+}
+
+} // namespace
+
+void DumpTensor(const std::string& dumpDir,
+                const std::string& requestName,
+                const std::string& tensorName,
+                const armnn::ConstTensor& tensor)
+{
+    // The dump directory must exist in advance.
+    fs::path dumpPath = dumpDir;
+    const fs::path fileName = dumpPath / (requestName + "_" + tensorName + ".dump");
+
+    std::ofstream fileStream;
+    fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
+
+    if (!fileStream.good())
+    {
+        VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
+        return;
+    }
+
+    DumpElementFunction dumpElementFunction = nullptr;
+
+    switch (tensor.GetDataType())
+    {
+        case armnn::DataType::Float32:
+        {
+            dumpElementFunction = &DumpTensorElement<float>;
+            break;
+        }
+        case armnn::DataType::QAsymmU8:
+        {
+            dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
+            break;
+        }
+        case armnn::DataType::Signed32:
+        {
+            dumpElementFunction = &DumpTensorElement<int32_t>;
+            break;
+        }
+        case armnn::DataType::Float16:
+        {
+            dumpElementFunction = &DumpTensorElement<armnn::Half>;
+            break;
+        }
+        case armnn::DataType::QAsymmS8:
+        {
+            dumpElementFunction = &DumpTensorElement<int8_t, int32_t>;
+            break;
+        }
+        case armnn::DataType::Boolean:
+        {
+            dumpElementFunction = &DumpTensorElement<bool>;
+            break;
+        }
+        default:
+        {
+            dumpElementFunction = nullptr;
+        }
+    }
+
+    if (dumpElementFunction != nullptr)
+    {
+        const unsigned int numDimensions = tensor.GetNumDimensions();
+        const armnn::TensorShape shape = tensor.GetShape();
+
+        if (!shape.AreAllDimensionsSpecified())
+        {
+            fileStream << "Cannot dump tensor elements: not all dimensions are specified" << std::endl;
+            return;
+        }
+        fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
+
+        if (numDimensions == 0)
+        {
+            fileStream << "# Shape []" << std::endl;
+            return;
+        }
+        fileStream << "# Shape [" << shape[0];
+        for (unsigned int d = 1; d < numDimensions; ++d)
+        {
+            fileStream << "," << shape[d];
+        }
+        fileStream << "]" << std::endl;
+        fileStream << "Each line contains the data of each of the elements of dimension0. In NCHW and NHWC, each line"
+                      " will be a batch" << std::endl << std::endl;
+
+        // Split will create a new line after all elements of the first dimension
+        // (in a 4, 3, 2, 3 tensor, there will be 4 lines of 18 elements)
+        unsigned int split = 1;
+        if (numDimensions == 1)
+        {
+            split = shape[0];
+        }
+        else
+        {
+            for (unsigned int i = 1; i < numDimensions; ++i)
+            {
+                split *= shape[i];
+            }
+        }
+
+        // Print all elements in the tensor
+        for (unsigned int elementIndex = 0; elementIndex < tensor.GetNumElements(); ++elementIndex)
+        {
+            (*dumpElementFunction)(tensor, elementIndex, fileStream);
+
+            if ( (elementIndex + 1) % split == 0 )
+            {
+                fileStream << std::endl;
+            }
+        }
+        fileStream << std::endl;
+    }
+    else
+    {
+        fileStream << "Cannot dump tensor elements: Unsupported data type "
+            << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
+    }
+
+    if (!fileStream.good())
+    {
+        VLOG(DRIVER) << "An error occurred when writing to file %s" << fileName.c_str();
+    }
+}
+
+void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
+                                 const std::string& dumpDir,
+                                 armnn::NetworkId networkId,
+                                 const armnn::IProfiler* profiler)
+{
+    // Check if profiling is required.
+    if (!gpuProfilingEnabled)
+    {
+        return;
+    }
+
+    // The dump directory must exist in advance.
+    if (dumpDir.empty())
+    {
+        return;
+    }
+
+    ARMNN_ASSERT(profiler);
+
+    // Set the name of the output profiling file.
+    fs::path dumpPath = dumpDir;
+    const fs::path fileName = dumpPath / (std::to_string(networkId) + "_profiling.json");
+
+    // Open the ouput file for writing.
+    std::ofstream fileStream;
+    fileStream.open(fileName.c_str(), std::ofstream::out | std::ofstream::trunc);
+
+    if (!fileStream.good())
+    {
+        VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
+        return;
+    }
+
+    // Write the profiling info to a JSON file.
+    profiler->Print(fileStream);
+}
+
+std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
+                                        const std::string& dumpDir)
+{
+    std::string fileName;
+    // The dump directory must exist in advance.
+    if (dumpDir.empty())
+    {
+        return fileName;
+    }
+
+    std::string timestamp = GetFileTimestamp();
+    if (timestamp.empty())
+    {
+        return fileName;
+    }
+
+    // Set the name of the output .dot file.
+    fs::path dumpPath = dumpDir;
+    fs::path tempFilePath = dumpPath / (timestamp + "_networkgraph.dot");
+    fileName = tempFilePath.string();
+
+    VLOG(DRIVER) << "Exporting the optimized network graph to file: %s" << fileName.c_str();
+
+    // Write the network graph to a dot file.
+    std::ofstream fileStream;
+    fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
+
+    if (!fileStream.good())
+    {
+        VLOG(DRIVER) << "Could not open file %s for writing" << fileName.c_str();
+        return fileName;
+    }
+
+    if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
+    {
+        VLOG(DRIVER) << "An error occurred when writing to file %s" << fileName.c_str();
+    }
+    return fileName;
+}
+
+std::string SerializeNetwork(const armnn::INetwork& network,
+                             const std::string& dumpDir,
+                             std::vector<uint8_t>& dataCacheData,
+                             bool dataCachingActive)
+{
+    std::string fileName;
+    bool bSerializeToFile = true;
+    if (dumpDir.empty())
+    {
+        bSerializeToFile = false;
+    }
+    else
+    {
+        std::string timestamp = GetFileTimestamp();
+        if (timestamp.empty())
+        {
+            bSerializeToFile = false;
+        }
+    }
+    if (!bSerializeToFile && !dataCachingActive)
+    {
+        return fileName;
+    }
+
+    auto serializer(armnnSerializer::ISerializer::Create());
+    // Serialize the Network
+    serializer->Serialize(network);
+    if (dataCachingActive)
+    {
+        std::stringstream stream;
+        auto serialized = serializer->SaveSerializedToStream(stream);
+        if (serialized)
+        {
+            std::string const serializedString{stream.str()};
+            std::copy(serializedString.begin(),
+                      serializedString.end(),
+                      std::back_inserter(dataCacheData));
+        }
+    }
+
+    if (bSerializeToFile)
+    {
+        // Set the name of the output .armnn file.
+        fs::path dumpPath = dumpDir;
+        std::string timestamp = GetFileTimestamp();
+        fs::path tempFilePath = dumpPath / (timestamp + "_network.armnn");
+        fileName = tempFilePath.string();
+
+        // Save serialized network to a file
+        std::ofstream serializedFile(fileName, std::ios::out | std::ios::binary);
+        auto serialized = serializer->SaveSerializedToStream(serializedFile);
+        if (!serialized)
+        {
+            VLOG(DRIVER) << "An error occurred when serializing to file %s" << fileName.c_str();
+        }
+    }
+    return fileName;
+}
+
+bool IsDynamicTensor(const armnn::TensorInfo& tensorInfo)
+{
+    if (tensorInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
+    {
+        return true;
+    }
+    // Account for the usage of the TensorShape empty constructor
+    if (tensorInfo.GetNumDimensions() == 0)
+    {
+        return true;
+    }
+    return !tensorInfo.GetShape().AreAllDimensionsSpecified();
+}
+
+bool AreDynamicTensorsSupported() //TODO
+{
+    return true;
+}
+
+bool isQuantizedOperand(const OperandType& operandType)
+{
+    if (operandType == OperandType::TENSOR_QUANT8_ASYMM ||
+        operandType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
+        operandType == OperandType::TENSOR_QUANT8_SYMM ||
+        operandType == OperandType::TENSOR_QUANT16_SYMM ||
+        operandType == OperandType::TENSOR_QUANT8_ASYMM_SIGNED)
+    {
+        return true;
+    }
+    else
+    {
+        return false;
+    }
+}
+
+std::string GetModelSummary(const Model& model)
+{
+    std::stringstream result;
+
+    result << model.main.inputIndexes.size() << " input(s), "
+           << model.main.operations.size() << " operation(s), "
+           << model.main.outputIndexes.size() << " output(s), "
+           << model.main.operands.size() << " operand(s) "
+           << std::endl;
+
+    result << "Inputs: ";
+    for (uint32_t i = 0; i < model.main.inputIndexes.size(); i++)
+    {
+        result << GetOperandSummary(model.main.operands[model.main.inputIndexes[i]]) << ", ";
+    }
+    result << std::endl;
+
+    result << "Operations: ";
+    for (uint32_t i = 0; i < model.main.operations.size(); i++)
+    {
+        result << model.main.operations[i].type << ", ";
+    }
+    result << std::endl;
+
+    result << "Outputs: ";
+    for (uint32_t i = 0; i < model.main.outputIndexes.size(); i++)
+    {
+        result << GetOperandSummary(model.main.operands[model.main.outputIndexes[i]]) << ", ";
+    }
+    result << std::endl;
+
+    return result.str();
+}
+
+std::string GetFileTimestamp()
+{
+    // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
+    // and getSupportedOperations.txt files)
+    timespec ts;
+    int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
+    std::stringstream ss;
+    if (iRet == 0)
+    {
+        ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec);
+    }
+    else
+    {
+        VLOG(DRIVER) << "clock_gettime failed with errno " <<
+            std::to_string(errno).c_str() << " : " <<
+            std::strerror(errno);
+    }
+    return ss.str();
+}
+
+void RenameExportedFiles(const std::string& existingSerializedFileName,
+                         const std::string& existingDotFileName,
+                         const std::string& dumpDir,
+                         const armnn::NetworkId networkId)
+{
+    if (dumpDir.empty())
+    {
+        return;
+    }
+    RenameFile(existingSerializedFileName, std::string("_network.armnn"), dumpDir, networkId);
+    RenameFile(existingDotFileName, std::string("_networkgraph.dot"), dumpDir, networkId);
+}
+
+void RenameFile(const std::string& existingName,
+                const std::string& extension,
+                const std::string& dumpDir,
+                const armnn::NetworkId networkId)
+{
+    if (existingName.empty() || dumpDir.empty())
+    {
+        return;
+    }
+
+    fs::path dumpPath = dumpDir;
+    const fs::path newFileName = dumpPath / (std::to_string(networkId) + extension);
+    int iRet = rename(existingName.c_str(), newFileName.c_str());
+    if (iRet != 0)
+    {
+        std::stringstream ss;
+        ss << "rename of [" << existingName << "] to [" << newFileName << "] failed with errno "
+           << std::to_string(errno) << " : " << std::strerror(errno);
+        VLOG(DRIVER) << ss.str().c_str();
+    }
+}
+
+void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
+{
+    // Commit output buffers.
+    // Note that we update *all* pools, even if they aren't actually used as outputs -
+    // this is simpler and is what the CpuExecutor does.
+    for (auto& pool : memPools)
+    {
+        // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
+        // update() has been removed and flush() added.
+        pool.flush();
+    }
+}
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/CanonicalUtils.hpp b/shim/sl/canonical/CanonicalUtils.hpp
new file mode 100644
index 0000000..a509684
--- /dev/null
+++ b/shim/sl/canonical/CanonicalUtils.hpp
@@ -0,0 +1,123 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+#include <armnn/ArmNN.hpp>
+
+#include <CpuExecutor.h>
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <vector>
+#include <string>
+#include <fstream>
+#include <iomanip>
+
+namespace armnn_driver
+{
+
+using namespace android::nn;
+
+extern const armnn::PermutationVector g_DontPermute;
+
+template <typename OperandType>
+class UnsupportedOperand: public std::runtime_error
+{
+public:
+    UnsupportedOperand(const OperandType type)
+        : std::runtime_error("Operand type is unsupported")
+        , m_type(type)
+    {}
+
+    OperandType m_type;
+};
+
+/// Swizzles tensor data in @a input according to the dimension mappings.
+void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo& tensor,
+                                     const void* input,
+                                     void* output,
+                                     const armnn::PermutationVector& mappings);
+
+/// Returns a pointer to a specific location in a pool`
+void* GetMemoryFromPool(DataLocation location,
+                        const std::vector<android::nn::RunTimePoolInfo>& memPools);
+
+void* GetMemoryFromPointer(const Request::Argument& requestArg);
+
+armnn::TensorInfo GetTensorInfoForOperand(const Operand& operand);
+
+std::string GetOperandSummary(const Operand& operand);
+
+bool isQuantizedOperand(const OperandType& operandType);
+
+std::string GetModelSummary(const Model& model);
+
+void DumpTensor(const std::string& dumpDir,
+                const std::string& requestName,
+                const std::string& tensorName,
+                const armnn::ConstTensor& tensor);
+
+void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
+                                 const std::string& dumpDir,
+                                 armnn::NetworkId networkId,
+                                 const armnn::IProfiler* profiler);
+
+std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
+                                        const std::string& dumpDir);
+
+std::string SerializeNetwork(const armnn::INetwork& network,
+                             const std::string& dumpDir,
+                             std::vector<uint8_t>& dataCacheData,
+                             bool dataCachingActive = true);
+
+void RenameExportedFiles(const std::string& existingSerializedFileName,
+                         const std::string& existingDotFileName,
+                         const std::string& dumpDir,
+                         const armnn::NetworkId networkId);
+
+void RenameFile(const std::string& existingName,
+                const std::string& extension,
+                const std::string& dumpDir,
+                const armnn::NetworkId networkId);
+
+/// Checks if a tensor info represents a dynamic tensor
+bool IsDynamicTensor(const armnn::TensorInfo& outputInfo);
+
+/// Checks for ArmNN support of dynamic tensors.
+bool AreDynamicTensorsSupported(void);
+
+std::string GetFileTimestamp();
+
+inline OutputShape ComputeShape(const armnn::TensorInfo& info)
+{
+    OutputShape shape;
+
+    armnn::TensorShape tensorShape = info.GetShape();
+    // Android will expect scalars as a zero dimensional tensor
+    if(tensorShape.GetDimensionality() == armnn::Dimensionality::Scalar)
+    {
+         shape.dimensions = std::vector<uint32_t>{};
+    }
+    else
+    {
+        std::vector<uint32_t> dimensions;
+        const unsigned int numDims = tensorShape.GetNumDimensions();
+        dimensions.resize(numDims);
+        for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx)
+        {
+            dimensions[outputIdx] = tensorShape[outputIdx];
+        }
+        shape.dimensions = dimensions;
+    }
+
+    shape.isSufficient = true;
+
+    return shape;
+}
+
+void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools);
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/ConversionUtils.cpp b/shim/sl/canonical/ConversionUtils.cpp
new file mode 100644
index 0000000..fbc5e28
--- /dev/null
+++ b/shim/sl/canonical/ConversionUtils.cpp
@@ -0,0 +1,1006 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ConversionUtils.hpp"
+#include <armnnUtils/Permute.hpp>
+
+///
+/// Helper classes
+///
+
+namespace armnn_driver
+{
+
+LayerInputHandle::LayerInputHandle()
+    : m_OutputSlot(nullptr)
+    , m_Valid(false)
+{}
+
+LayerInputHandle::LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo)
+    : m_OutputSlot(outputSlot)
+    , m_Valid(valid)
+    , m_TensorInfo(tensorInfo)
+{}
+
+bool LayerInputHandle::IsValid() const
+{
+    return m_Valid;
+}
+
+void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
+{
+    ARMNN_ASSERT(IsValid());
+    if (m_OutputSlot)
+    {
+        m_OutputSlot->Connect(inputSlot);
+    }
+}
+
+void LayerInputHandle::Disconnect(armnn::IInputSlot& inputSlot)
+{
+    ARMNN_ASSERT(IsValid());
+    if (m_OutputSlot)
+    {
+        m_OutputSlot->Disconnect(inputSlot);
+    }
+}
+
+const armnn::TensorInfo& LayerInputHandle::GetTensorInfo() const
+{
+    return m_TensorInfo;
+}
+
+void LayerInputHandle::SanitizeQuantizationScale(LayerInputHandle& weight, LayerInputHandle& input)
+{
+    if (m_OutputSlot)
+    {
+        armnn::TensorInfo weightInfo = weight.GetTensorInfo();
+        armnn::TensorInfo inputInfo = input.GetTensorInfo();
+        armnn::TensorInfo biasInfo = GetTensorInfo();
+
+        SanitizeBiasQuantizationScale(biasInfo, weightInfo, inputInfo);
+
+        m_TensorInfo = biasInfo;
+        m_OutputSlot->SetTensorInfo(biasInfo);
+    }
+}
+
+ConstTensorPin::ConstTensorPin(bool optional)
+    : m_Optional(optional)
+{}
+
+ConstTensorPin::ConstTensorPin(armnn::TensorInfo& tensorInfo,
+                               const void* valueStart,
+                               uint32_t numBytes,
+                               const armnn::PermutationVector& mappings)
+    : m_Optional(false)
+{
+    armnn::IgnoreUnused(numBytes);
+    if (tensorInfo.GetNumBytes() != numBytes)
+    {
+        VLOG(DRIVER) << "The size of ConstTensor does not match its TensorInfo.";
+    }
+
+    const bool needsSwizzling = (mappings.GetSize() > 0);
+    if (needsSwizzling)
+    {
+        m_SwizzledTensorData.resize(tensorInfo.GetNumBytes());
+        SwizzleAndroidNn4dTensorToArmNn(tensorInfo, valueStart, m_SwizzledTensorData.data(), mappings);
+
+        m_ConstTensor = armnn::ConstTensor(tensorInfo, m_SwizzledTensorData.data());
+    }
+    else
+    {
+        m_ConstTensor = armnn::ConstTensor(tensorInfo, valueStart);
+    }
+}
+
+bool ConstTensorPin::IsValid() const
+{
+    return m_ConstTensor.GetMemoryArea() != nullptr;
+}
+
+bool ConstTensorPin::IsOptional() const
+{
+    return m_Optional;
+}
+
+const armnn::ConstTensor& ConstTensorPin::GetConstTensor() const
+{
+    return m_ConstTensor;
+}
+
+const armnn::ConstTensor* ConstTensorPin::GetConstTensorPtr() const
+{
+    if (IsValid() && m_ConstTensor.GetNumElements() > 0)
+    {
+        return &m_ConstTensor;
+    }
+    // tensor is either invalid, or has no elements (indicating an optional tensor that was not provided)
+    return nullptr;
+}
+
+///
+/// Utility functions
+///
+
+bool IsWeightsValid(const Operation& operation,
+                    uint32_t inputIndex,
+                    const Model& model)
+{
+    const Operand* operand = GetInputOperand(operation, inputIndex, model);
+    if (!operand)
+    {
+        Fail("%s: failed to get input operand %i", __func__, inputIndex);
+        return false;
+    }
+
+    if (operand->lifetime    != OperandLifeTime::CONSTANT_COPY
+        && operand->lifetime != OperandLifeTime::CONSTANT_REFERENCE
+        && operand->lifetime != OperandLifeTime::NO_VALUE)
+    {
+        return false;
+    }
+    return true;
+}
+
+ConstTensorPin ConvertOperandToConstTensorPin(const Operand& operand,
+                                              const Model& model,
+                                              const ConversionData& data,
+                                              const armnn::PermutationVector& dimensionMappings,
+                                              const armnn::TensorShape* overrideTensorShape,
+                                              bool optional)
+{
+    if (!IsOperandTypeSupportedForTensors(operand.type))
+    {
+        VLOG(DRIVER) << __func__ << ": unsupported operand type for tensor" << operand.type;
+        return ConstTensorPin();
+    }
+
+    if (!optional && !IsOperandConstant(operand))
+    {
+        VLOG(DRIVER) << __func__ << ": lifetime for input tensor: r" << operand.lifetime;
+        return ConstTensorPin();
+    }
+
+    const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data, optional);
+    if (!valueStart)
+    {
+        if (optional)
+        {
+            // optional tensor with no values is not really an error; return it as invalid, but marked as optional
+            return ConstTensorPin(true);
+        }
+        // mandatory tensor with no values
+        Fail("%s: failed to get operand address", __func__);
+        return ConstTensorPin();
+    }
+
+    armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
+
+    // Make sure isConstant flag is set.
+    tensorInfo.SetConstant();
+
+    if (overrideTensorShape != nullptr)
+    {
+        tensorInfo.SetShape(*overrideTensorShape);
+    }
+    return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
+}
+
+LayerInputHandle ConvertToLayerInputHandle(const Operation& operation,
+                                           uint32_t inputIndex,
+                                           const Model& model,
+                                           ConversionData& data,
+                                           const armnn::PermutationVector& dimensionMappings)
+{
+
+    const Operand* operand = GetInputOperand(operation, inputIndex, model);
+    if (!operand)
+    {
+        Fail("%s: failed to get input operand %i", __func__, inputIndex);
+        return LayerInputHandle();
+    }
+
+    if (!IsOperandTypeSupportedForTensors(operand->type))
+    {
+        VLOG(DRIVER) << __func__ << ": unsupported operand type for tensor: " << operand->type;
+        return LayerInputHandle();
+    }
+
+    try
+    {
+        armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
+
+        if (IsDynamicTensor(operandTensorInfo))
+        {
+            data.m_DynamicInputsEncountered = true;
+
+            const uint32_t operandIndex = operation.inputs[inputIndex];
+
+            // Check if the dynamic input tensors have been inferred by one of the previous layers
+            // If not we can't support them
+            if (data.m_OutputSlotForOperand.size() >= operandIndex && data.m_OutputSlotForOperand[operandIndex])
+            {
+                operandTensorInfo = data.m_OutputSlotForOperand[operandIndex]->GetTensorInfo();
+            }
+            else
+            {
+                Fail("%s: Type 2 dynamic input tensors are not supported", __func__);
+                return LayerInputHandle();
+            }
+        }
+
+        switch (operand->lifetime)
+        {
+            case OperandLifeTime::SUBGRAPH_INPUT:
+            {
+                // NOTE: We must check whether we can support the input tensor on at least one
+                // of the provided backends; otherwise we cannot convert the operation
+                bool isInputSupported = false;
+                FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                           IsInputSupported,
+                                           data.m_Backends,
+                                           isInputSupported,
+                                           operandTensorInfo);
+
+                if (!isInputSupported)
+                {
+                    Fail("%s: unsupported input tensor", __func__);
+                    return LayerInputHandle();
+                }
+
+                [[clang::fallthrough]]; // intentional fallthrough
+            }
+            case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
+            case OperandLifeTime::SUBGRAPH_OUTPUT:
+            {
+                // The tensor is either an operand internal to the model, or a model input.
+                // It can be associated with an ArmNN output slot for an existing layer.
+
+                // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
+                const uint32_t operandIndex = operation.inputs[inputIndex];
+                return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
+            }
+            case OperandLifeTime::CONSTANT_COPY: // intentional fallthrough
+            case OperandLifeTime::POINTER:
+            case OperandLifeTime::CONSTANT_REFERENCE:
+            {
+                // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
+                ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, data, dimensionMappings);
+                if (tensorPin.IsValid())
+                {
+                    bool isSupported = false;
+                    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                               IsConstantSupported,
+                                               data.m_Backends,
+                                               isSupported,
+                                               tensorPin.GetConstTensor().GetInfo());
+                    if (!isSupported)
+                    {
+                        return LayerInputHandle();
+                    }
+
+                    armnn::IConnectableLayer* constantLayer =
+                        data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
+                    armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
+                    outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
+
+                    return LayerInputHandle(true, &outputSlot, operandTensorInfo);
+                }
+                else
+                {
+                    Fail("%s: invalid operand tensor", __func__);
+                    return LayerInputHandle();
+                }
+                break;
+            }
+            default:
+            {
+                VLOG(DRIVER) << __func__ << ": unsupported lifetime for input tensor: " << operand->lifetime;
+                return LayerInputHandle();
+            }
+        }
+    }
+    catch (UnsupportedOperand<OperandType>& e)
+    {
+        VLOG(DRIVER) << __func__ << ": Operand type: " << e.m_type << " not supported in ArmnnDriver";
+        return LayerInputHandle();
+    }
+}
+
+bool ConvertPaddings(const Operation& operation,
+                     const Model& model,
+                     ConversionData& data,
+                     unsigned int rank,
+                     armnn::PadDescriptor& padDescriptor)
+{
+    const Operand* paddingsOperand = GetInputOperand(operation, 1, model);
+    if (!paddingsOperand)
+    {
+        return Fail("%s: Could not read paddings operand", __func__);
+    }
+
+    armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
+    if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
+    {
+        return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]",  __func__, rank);
+    }
+
+    std::vector<int32_t> paddings;
+    if (!GetTensorInt32Values(*paddingsOperand, paddings, model, data))
+    {
+        return Fail("%s: Operation has invalid or unsupported paddings operand", __func__);
+    }
+
+    // add padding for each dimension of input tensor.
+    for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
+    {
+        int paddingBeforeInput = paddings[i];
+        int paddingAfterInput  = paddings[i + 1];
+
+        if (paddingBeforeInput < 0 || paddingAfterInput < 0)
+        {
+            return Fail("%s: Operation has invalid paddings operand, invalid padding values.",  __func__);
+        }
+
+        padDescriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
+    }
+
+    return true;
+}
+
+
+bool ConvertPooling2d(const Operation& operation,
+                      const char* operationName,
+                      armnn::PoolingAlgorithm poolType,
+                      const Model& model,
+                      ConversionData& data)
+{
+
+    VLOG(DRIVER) << "Converter::ConvertL2Pool2d()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation Could not read input 0", operationName);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    armnn::Pooling2dDescriptor desc;
+    desc.m_PoolType = poolType;
+    desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+    desc.m_DataLayout = armnn::DataLayout::NHWC;
+
+    ActivationFn activation;
+
+    auto inputSize = operation.inputs.size();
+
+    if (inputSize >= 10)
+    {
+        // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
+        if (!GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft, model, data) ||
+            !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight, model, data) ||
+            !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop, model, data) ||
+            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom, model, data) ||
+            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX, model, data) ||
+            !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY, model, data) ||
+            !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth, model, data) ||
+            !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight, model, data) ||
+            !GetInputActivationFunction(operation, 9, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs", operationName);
+        }
+
+        if (Is12OrLaterOperand(*output))
+        {
+            desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
+        }
+    }
+    else
+    {
+        // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
+        ::android::nn::PaddingScheme scheme;
+        if (!GetInputPaddingScheme(operation, 1, scheme, model, data) ||
+            !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX, model, data) ||
+            !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY, model, data) ||
+            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth, model, data) ||
+            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight, model, data) ||
+            !GetInputActivationFunction(operation, 6, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs", operationName);
+        }
+
+        if (Is12OrLaterOperand(*output))
+        {
+            desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data);
+        }
+
+        const armnnUtils::DataLayoutIndexed dataLayout(desc.m_DataLayout);
+        const unsigned int inputWidth  = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
+        const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
+
+        CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
+        CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
+    }
+
+    bool isSupported = false;
+
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsPooling2dSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   desc);
+
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
+    if (!pooling2dLayer)
+    {
+        return Fail("%s: AddPooling2dLayer failed", __func__);
+    }
+
+    input.Connect(pooling2dLayer->GetInputSlot(0));
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *pooling2dLayer, model,
+                                        data, nullptr, validateFunc, activation);
+}
+
+bool ConvertReduce(const Operation& operation,
+                   const Model& model,
+                   ConversionData& data,
+                   armnn::ReduceOperation reduceOperation)
+{
+    armnn::ReduceDescriptor descriptor;
+    descriptor.m_ReduceOperation = reduceOperation;
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    const Operand* axisOperand = GetInputOperand(operation, 1, model);
+    if (!axisOperand)
+    {
+        return Fail("%s: Could not read input 1", __func__);
+    }
+    std::vector<int32_t> axis;
+    if (!GetTensorInt32Values(*axisOperand, axis, model, data))
+    {
+        return Fail("%s: Input 1 has invalid values", __func__);
+    }
+
+    // Convert the axis to unsigned int and remove duplicates.
+    unsigned int rank = inputInfo.GetNumDimensions();
+    std::set<unsigned int> uniqueAxis;
+    std::transform(axis.begin(), axis.end(),
+                   std::inserter(uniqueAxis, uniqueAxis.begin()),
+                   [rank](int i) -> unsigned int { return (i + rank) % rank; });
+    descriptor.m_vAxis.assign(uniqueAxis.begin(), uniqueAxis.end());
+
+    // Get the "keep dims" flag.
+    if (!GetInputScalar(operation, 2, OperandType::BOOL, descriptor.m_KeepDims, model, data))
+    {
+        return Fail("%s: Could not read input 2", __func__);
+    }
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsReduceSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddReduceLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+
+bool ConvertToActivation(const Operation& operation,
+                         const char* operationName,
+                         const armnn::ActivationDescriptor& activationDesc,
+                         const Model& model,
+                         ConversionData& data)
+{
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Input 0 is invalid", operationName);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return false;
+    }
+
+    const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
+
+    bool isSupported = false;
+
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsActivationSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input.GetTensorInfo(),
+                                   outInfo,
+                                   activationDesc);
+    };
+
+    if(IsDynamicTensor(outInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
+    ARMNN_ASSERT(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+DequantizeResult DequantizeIfRequired(size_t operand_index,
+                                      const Operation& operation,
+                                      const Model& model,
+                                      const ConversionData& data)
+{
+    const Operand* weightsOperand = GetInputOperand(operation, operand_index, model);
+    if (!weightsOperand)
+    {
+        return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
+    }
+
+    if (IsOperandConstant(*weightsOperand))
+    {
+        // Weights are already constant
+        return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::NOT_REQUIRED };
+    }
+
+    const size_t weightsInputIndex = operation.inputs[operand_index];
+
+    // The weights are a non const tensor, this indicates they might be the output of a dequantize op.
+    // Iterate over the nodes and find the previous operation which should be DEQUANTIZE
+    for (uint32_t operationIdx = 0; operationIdx < getMainModel(model).operations.size(); ++operationIdx)
+    {
+        // Search for the DEQUANTIZE op which has the operand with index equal to operandIndex
+        const auto& operationIt = getMainModel(model).operations[operationIdx];
+        if (operationIt.type != OperationType::DEQUANTIZE)
+        {
+            continue;
+        }
+
+        size_t outOpIndex = weightsInputIndex + 1;
+        for (size_t i = 0; outOpIndex != weightsInputIndex && i < operationIt.outputs.size(); ++i)
+        {
+            outOpIndex = operationIt.outputs[i];
+        }
+
+        if (outOpIndex != weightsInputIndex)
+        {
+            continue;
+        }
+
+        const Operand* operand = GetInputOperand(operationIt, 0, model);
+        ARMNN_ASSERT(operand);
+
+        if (!IsQSymm8(*operand))
+        {
+            // Only supporting dequantize from QSYMM8 to FLOAT
+            break;
+        }
+
+        // Allocate a new buffer for the dequantized data and manually dequantize
+        const void* startValue = GetOperandValueReadOnlyAddress(*operand, model, data);
+        if (!startValue)
+        {
+            // Failed to get the operand address
+            break;
+        }
+
+        const uint8_t* quantizedBuffer = reinterpret_cast<const uint8_t*>(startValue);
+        size_t dequantizedBufferLength = operand->location.length;
+        const float quantizationScale  = operand->scale;
+
+        auto dequantizedBuffer = std::make_unique<float[]>(dequantizedBufferLength + 1);
+        for (size_t i = 0; i < dequantizedBufferLength; ++i)
+        {
+            float* dstPtr = dequantizedBuffer.get();
+            ARMNN_ASSERT(dstPtr);
+            *dstPtr++ = quantizedBuffer[i] * quantizationScale;
+        }
+
+        // Construct tensor info for dequantized ConstTensor
+        armnn::TensorInfo tensorInfo(operand->dimensions.size(),
+                                     operand->dimensions.data(),
+                                     armnn::DataType::Float32);
+
+        return { std::move(dequantizedBuffer), dequantizedBufferLength * sizeof(float),
+                 std::move(tensorInfo),
+                 DequantizeStatus::SUCCESS };
+    }
+
+    return { nullptr, 0, armnn::TensorInfo() , DequantizeStatus::NOT_REQUIRED};
+}
+
+ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation& operation,
+                                               const Model& model,
+                                               const ConversionData& data,
+                                               size_t operandIndex,
+                                               bool optional)
+{
+    DequantizeResult dequantized = DequantizeIfRequired(operandIndex,operation, model, data);
+
+    DequantizeStatus status = std::get<3>(dequantized);
+    switch (status)
+    {
+        case DequantizeStatus::INVALID_OPERAND:
+        {
+            // return invalid const tensor pin
+            return ConstTensorPin();
+        }
+        case DequantizeStatus::NOT_REQUIRED:
+        {
+            return ConvertOperationInputToConstTensorPin(
+                operation, operandIndex, model, data, g_DontPermute, nullptr, optional);
+        }
+        case DequantizeStatus::SUCCESS:
+        default:
+        {
+            return ConstTensorPin(
+                std::get<2>(dequantized), std::get<0>(dequantized).get(), std::get<1>(dequantized), g_DontPermute);
+        }
+    }
+}
+
+bool GetInputPaddingScheme(const Operation& operation,
+                           uint32_t inputIndex,
+                           PaddingScheme& outPaddingScheme,
+                           const Model& model,
+                           const ConversionData& data)
+{
+    int32_t paddingSchemeAsInt;
+    if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt, model, data))
+    {
+        return Fail("%s: failed to get padding scheme input value", __func__);
+    }
+
+    outPaddingScheme = static_cast<::android::nn::PaddingScheme>(paddingSchemeAsInt);
+    return true;
+}
+
+const void* GetOperandValueReadOnlyAddress(const Operand& operand,
+                                           const Model& model,
+                                           const ConversionData& data,
+                                           bool optional)
+{
+    const void* valueStart = nullptr;
+    switch (operand.lifetime)
+    {
+        case OperandLifeTime::CONSTANT_COPY:
+        {
+            valueStart = model.operandValues.data() + operand.location.offset;
+            break;
+        }
+        case OperandLifeTime::POINTER:
+        {
+            // Pointer specified in the model
+            valueStart = std::get<const void*>(operand.location.pointer);
+            break;
+        }
+        case OperandLifeTime::CONSTANT_REFERENCE:
+        {
+            // Constant specified via a Memory object
+            valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
+            break;
+        }
+        case OperandLifeTime::NO_VALUE:
+        {
+            // An optional input tensor with no values is not an error so should not register as a fail
+            if (optional)
+            {
+                valueStart = nullptr;
+                break;
+            }
+            [[fallthrough]];
+        }
+        default:
+        {
+            VLOG(DRIVER) << __func__ << ": unsupported/invalid operand lifetime:: " << operand.lifetime;
+            valueStart = nullptr;
+        }
+    }
+
+    return valueStart;
+}
+
+bool GetTensorInt32Values(const Operand& operand,
+                                 std::vector<int32_t>& outValues,
+                                 const Model& model,
+                                 const ConversionData& data)
+{
+    if (operand.type != OperandType::TENSOR_INT32)
+    {
+        VLOG(DRIVER) << __func__ << ": invalid operand type: " << operand.type;
+        return false;
+    }
+
+    const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
+    if (!startAddress)
+    {
+        VLOG(DRIVER) << __func__ << ": failed to get operand address " << operand.type;
+        return false;
+    }
+
+    // Check number of bytes is sensible
+    const uint32_t numBytes = operand.location.length;
+    if (numBytes % sizeof(int32_t) != 0)
+    {
+        return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
+                    __func__, numBytes, sizeof(int32_t));
+    }
+
+    outValues.resize(numBytes / sizeof(int32_t));
+    memcpy(outValues.data(), startAddress, numBytes);
+    return true;
+}
+
+armnn::DataLayout OptionalDataLayout(const Operation& operation,
+                                     uint32_t inputIndex,
+                                     const Model& model,
+                                     ConversionData& data)
+{
+    const Operand* operand = GetInputOperand(operation, inputIndex, model);
+    if (!operand)
+    {
+        return armnn::DataLayout::NHWC;
+    }
+
+    if (!IsBool(*operand))
+    {
+        return armnn::DataLayout::NHWC;
+    }
+
+    const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
+    if (!valueAddress)
+    {
+        return armnn::DataLayout::NHWC;
+    }
+
+    if (*(static_cast<const bool*>(valueAddress)))
+    {
+        return armnn::DataLayout::NCHW;
+    }
+    else
+    {
+        return armnn::DataLayout::NHWC;
+    }
+}
+
+armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
+                                            ActivationFn activation,
+                                            armnn::IConnectableLayer* prevLayer,
+                                            ConversionData& data)
+{
+    ARMNN_ASSERT(prevLayer->GetNumOutputSlots() == 1);
+
+    prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+
+    armnn::IConnectableLayer* activationLayer = prevLayer;
+
+    if (activation != ActivationFn::kActivationNone)
+    {
+        armnn::ActivationDescriptor activationDesc;
+        switch (activation)
+        {
+            case ActivationFn::kActivationRelu:
+            {
+                activationDesc.m_Function = armnn::ActivationFunction::ReLu;
+                break;
+            }
+            case ActivationFn::kActivationRelu1:
+            {
+                activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+                activationDesc.m_A = 1.0f;
+                activationDesc.m_B = -1.0f;
+                break;
+            }
+            case ActivationFn::kActivationRelu6:
+            {
+                activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
+                activationDesc.m_A = 6.0f;
+                break;
+            }
+            case ActivationFn::kActivationSigmoid:
+            {
+                activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
+                break;
+            }
+            case ActivationFn::kActivationTanh:
+            {
+                activationDesc.m_Function = armnn::ActivationFunction::TanH;
+                activationDesc.m_A = 1.0f;
+                activationDesc.m_B = 1.0f;
+                break;
+            }
+            default:
+            {
+                Fail("%s: Invalid activation enum value %i", __func__, activation);
+                return nullptr;
+            }
+        }
+
+        bool isSupported = false;
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsActivationSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   prevLayer->GetOutputSlot(0).GetTensorInfo(),
+                                   tensorInfo,
+                                   activationDesc);
+        if (!isSupported)
+        {
+            return nullptr;
+        }
+
+        activationLayer = data.m_Network->AddActivationLayer(activationDesc);
+
+        prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
+        activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
+    }
+
+    return activationLayer;
+}
+
+bool SetupAndTrackLayerOutputSlot(const Operation& operation,
+                                  uint32_t operationOutputIndex,
+                                  armnn::IConnectableLayer& layer,
+                                  uint32_t layerOutputIndex,
+                                  const Model& model,
+                                  ConversionData& data,
+                                  const armnn::TensorInfo* overrideOutputInfo,
+                                  const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc,
+                                  const ActivationFn& activationFunction,
+                                  bool inferOutputShapes)
+{
+    const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex, model);
+    if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
+    {
+        return false;
+    }
+
+    armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
+    if (overrideOutputInfo == nullptr)
+    {
+        outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
+    }
+    else
+    {
+        outputSlot.SetTensorInfo(*overrideOutputInfo);
+    }
+
+    bool isSupported = false;
+    if (validateFunc && (IsDynamicTensor(outputSlot.GetTensorInfo()) || inferOutputShapes))
+    {
+        // Type one dynamic tensors require the previous layer's output shape for inference
+        for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
+        {
+            if(!layer.GetInputSlot(inputSlotIndex).GetConnection())
+            {
+                return false;
+            }
+        }
+        // IsTensorInfoSet will infer the dynamic output shape
+        outputSlot.IsTensorInfoSet();
+        // Once the shape is inferred we can validate it
+        validateFunc(outputSlot.GetTensorInfo(), isSupported);
+
+        if(!isSupported)
+        {
+            for (unsigned int inputSlotIndex = 0; inputSlotIndex < layer.GetNumInputSlots(); ++inputSlotIndex)
+            {
+                layer.GetInputSlot(inputSlotIndex).GetConnection()->Disconnect(layer.GetInputSlot(inputSlotIndex));
+            }
+            return false;
+        }
+    }
+
+    const uint32_t operandIndex = operation.outputs[operationOutputIndex];
+
+    if (activationFunction != ActivationFn::kActivationNone)
+    {
+        const armnn::TensorInfo& activationOutputInfo = outputSlot.GetTensorInfo();
+        armnn::IConnectableLayer* const endLayer = ProcessActivation(activationOutputInfo, activationFunction,
+                                                                     &layer, data);
+
+        if (!endLayer)
+        {
+            return Fail("%s: ProcessActivation failed", __func__);
+        }
+
+        armnn::IOutputSlot& activationOutputSlot = endLayer->GetOutputSlot(layerOutputIndex);
+        data.m_OutputSlotForOperand[operandIndex] = &activationOutputSlot;
+    }
+    else
+    {
+        data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
+    }
+
+    return true;
+}
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/ConversionUtils.hpp b/shim/sl/canonical/ConversionUtils.hpp
new file mode 100644
index 0000000..5847d21
--- /dev/null
+++ b/shim/sl/canonical/ConversionUtils.hpp
@@ -0,0 +1,1013 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "CanonicalUtils.hpp"
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/BackendHelper.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/NumericCast.hpp>
+
+#include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnnUtils/Transpose.hpp>
+
+#include <ActivationFunctor.h>
+#include <CpuExecutor.h>
+#include <OperationsUtils.h>
+
+#include <armnnUtils/FloatingPointComparison.hpp>
+
+#include <log/log.h>
+#include <vector>
+
+inline const android::nn::Model::Subgraph& getMainModel(const android::nn::Model& model) { return model.main; }
+
+namespace armnn_driver
+{
+
+///
+/// Helper classes
+///
+
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+
+using Model                     = ::android::nn::Model;
+using Operand                   = ::android::nn::Operand;
+using OperandLifeTime           = ::android::nn::Operand::LifeTime;
+using OperandType               = ::android::nn::OperandType;
+using Operation                 = ::android::nn::Operation;
+using OperationType             = ::android::nn::OperationType;
+using ErrorStatus               = ::android::nn::ErrorStatus;
+
+struct ConversionData
+{
+    ConversionData(const std::vector<armnn::BackendId>& backends)
+    : m_Backends(backends)
+    , m_Network(nullptr, nullptr)
+    , m_DynamicInputsEncountered(false)
+    {}
+
+    const std::vector<armnn::BackendId>       m_Backends;
+    armnn::INetworkPtr                        m_Network;
+    std::vector<armnn::IOutputSlot*>          m_OutputSlotForOperand;
+    std::vector<::android::nn::RunTimePoolInfo> m_MemPools;
+    bool m_DynamicInputsEncountered;
+};
+
+class LayerInputHandle
+{
+public:
+    LayerInputHandle();
+    LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
+
+    bool IsValid() const;
+
+    void Connect(armnn::IInputSlot& inputSlot);
+
+    void Disconnect(armnn::IInputSlot& inputSlot);
+
+    const armnn::TensorInfo& GetTensorInfo() const;
+
+    void SanitizeQuantizationScale(LayerInputHandle& weight, LayerInputHandle& input);
+
+private:
+    armnn::IOutputSlot* m_OutputSlot;
+    bool                m_Valid;
+    armnn::TensorInfo   m_TensorInfo;
+};
+
+class ConstTensorPin
+{
+public:
+    // Creates an invalid tensor pin (can be used to signal errors)
+    // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
+    ConstTensorPin(bool optional = false);
+
+    // @param tensorInfo TensorInfo associated with the tensor.
+    // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
+    // the model being converted.
+    // @param numBytes Number of bytes for the tensor data.
+    ConstTensorPin(armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
+                   const armnn::PermutationVector& mappings);
+
+    ConstTensorPin(const ConstTensorPin& other) = delete;
+    ConstTensorPin(ConstTensorPin&& other)      = default;
+
+    bool IsValid() const;
+    bool IsOptional() const;
+
+    const armnn::ConstTensor& GetConstTensor() const;
+    const armnn::ConstTensor* GetConstTensorPtr() const;
+
+private:
+    armnn::ConstTensor m_ConstTensor;
+
+    // Owned memory for swizzled tensor data, only required if the tensor needed
+    // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
+    // the pools associated with the model being converted.
+    std::vector<uint8_t> m_SwizzledTensorData;
+
+    // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
+    bool m_Optional;
+};
+
+enum class ConversionResult
+{
+    Success,
+    ErrorMappingPools,
+    UnsupportedFeature
+};
+
+} // namespace armnn_driver
+
+///
+/// Utility functions
+///
+
+namespace
+{
+using namespace armnn_driver;
+
+// Convenience function to log the reason for failing to convert a model.
+// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
+template<class... Args>
+static bool Fail(const char* formatStr, Args&&... args)
+{
+    ALOGD(formatStr, std::forward<Args>(args)...);
+    return false;
+}
+
+// Convenience macro to call an Is*Supported function and log caller name together with reason for lack of support.
+// Called as: FORWARD_LAYER_SUPPORT_FUNC(__func__, Is*Supported, backends, a, b, c, d, e)
+#define FORWARD_LAYER_SUPPORT_FUNC(funcName, func, backends, supported, ...) \
+try \
+{ \
+    for (auto&& backendId : backends) \
+    { \
+        auto layerSupportObject = armnn::GetILayerSupportByBackendId(backendId); \
+        if (layerSupportObject.IsBackendRegistered()) \
+        { \
+            std::string reasonIfUnsupported; \
+            supported = \
+                layerSupportObject.func(__VA_ARGS__, armnn::Optional<std::string&>(reasonIfUnsupported)); \
+            if (supported) \
+            { \
+                break; \
+            } \
+            else \
+            { \
+                if (reasonIfUnsupported.size() > 0) \
+                { \
+                    VLOG(DRIVER) << funcName << ": not supported by armnn: " <<  reasonIfUnsupported.c_str(); \
+                } \
+                else \
+                { \
+                    VLOG(DRIVER) << funcName << ": not supported by armnn"; \
+                } \
+            } \
+        } \
+        else \
+        { \
+            VLOG(DRIVER) << funcName << ": backend not registered: " << backendId.Get().c_str(); \
+        } \
+    } \
+    if (!supported) \
+    { \
+        VLOG(DRIVER) << funcName << ": not supported by any specified backend"; \
+    } \
+} \
+catch (const armnn::InvalidArgumentException &e) \
+{ \
+    throw armnn::InvalidArgumentException(e, "Failed to check layer support", CHECK_LOCATION()); \
+}
+
+inline armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
+{
+    return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
+}
+
+// Support within the 1.3 driver for specific tensor data types
+inline bool IsOperandTypeSupportedForTensors(OperandType type)
+{
+    return type == OperandType::BOOL                           ||
+           type == OperandType::TENSOR_BOOL8                   ||
+           type == OperandType::TENSOR_FLOAT16                 ||
+           type == OperandType::TENSOR_FLOAT32                 ||
+           type == OperandType::TENSOR_QUANT8_ASYMM            ||
+           type == OperandType::TENSOR_QUANT8_ASYMM_SIGNED     ||
+           type == OperandType::TENSOR_QUANT8_SYMM             ||
+           type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL ||
+           type == OperandType::TENSOR_QUANT16_SYMM            ||
+           type == OperandType::TENSOR_INT32;
+}
+
+inline bool IsBool(Operand operand)
+{
+    return operand.type == OperandType::BOOL;
+}
+
+inline bool Is12OrLaterOperand(Operand)
+{
+    return true;
+}
+
+
+template<typename LayerHandleType>
+armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network,
+                                          LayerHandleType& inputLayer,
+                                          armnn::TensorInfo reshapeInfo)
+{
+    armnn::ReshapeDescriptor reshapeDescriptor;
+    reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
+
+    armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
+    ARMNN_ASSERT(reshapeLayer != nullptr);
+
+    // Attach the input layer to the reshape layer
+    inputLayer.Connect(reshapeLayer->GetInputSlot(0));
+    reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
+
+    return *reshapeLayer;
+}
+
+
+ armnn::TensorShape FlattenFullyConnectedInput(const armnn::TensorShape& inputShape,
+                                               const armnn::TensorShape& weightsShape)
+{
+    if (inputShape.GetNumDimensions() > 2U)
+    {
+        unsigned int totalInputElements = inputShape.GetNumElements();
+        unsigned int inputSize = weightsShape[1];
+
+        unsigned int batchSize = totalInputElements / inputSize;
+
+        if(totalInputElements % batchSize != 0)
+        {
+            throw std::runtime_error("Failed to deduce tensor shape");
+        }
+
+        return armnn::TensorShape({batchSize, inputSize});
+    }
+    else
+    {
+        return inputShape;
+    }
+}
+
+inline bool VerifyFullyConnectedShapes(const armnn::TensorShape& inputShape,
+                                       const armnn::TensorShape& weightsShape,
+                                       const armnn::TensorShape& outputShape,
+                                       bool  transposeWeightMatrix)
+{
+    unsigned int dimIdx = transposeWeightMatrix ? 0 : 1;
+    return (inputShape[0] == outputShape[0] && weightsShape[dimIdx] == outputShape[1]);
+}
+
+bool BroadcastTensor(LayerInputHandle& input0,
+                     LayerInputHandle& input1,
+                     armnn::IConnectableLayer* startLayer,
+                     ConversionData& data)
+{
+    ARMNN_ASSERT(startLayer != nullptr);
+
+    const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
+    const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
+
+    unsigned int inputDimensions0 = inputInfo0.GetNumDimensions();
+    unsigned int inputDimensions1 = inputInfo1.GetNumDimensions();
+
+    if (inputDimensions0 == inputDimensions1)
+    {
+        // The inputs have the same number of dimensions, simply connect them to the given layer as they are
+        input0.Connect(startLayer->GetInputSlot(0));
+        input1.Connect(startLayer->GetInputSlot(1));
+
+        return true;
+    }
+
+    // Since the number of dimensions do not match then we need to add degenerate dimensions
+    // to the "smaller" tensor using a reshape, while keeping the order of the inputs.
+
+    unsigned int maxInputDimensions = std::max(inputDimensions0, inputDimensions1);
+    unsigned int sizeDifference = std::abs(armnn::numeric_cast<int>(inputDimensions0) -
+                                           armnn::numeric_cast<int>(inputDimensions1));
+
+    bool input0IsSmaller = inputDimensions0 < inputDimensions1;
+    LayerInputHandle& smallInputHandle = input0IsSmaller ? input0 : input1;
+    const armnn::TensorInfo& smallInfo = smallInputHandle.GetTensorInfo();
+
+    const armnn::TensorShape& smallShape = smallInfo.GetShape();
+    std::vector<unsigned int> reshapedDimensions(maxInputDimensions, 1);
+    for (unsigned int i = sizeDifference; i < maxInputDimensions; i++)
+    {
+        reshapedDimensions[i] = smallShape[i - sizeDifference];
+    }
+
+    armnn::TensorInfo reshapedInfo = smallInfo;
+    reshapedInfo.SetShape(armnn::TensorShape{ armnn::numeric_cast<unsigned int>(reshapedDimensions.size()),
+                                              reshapedDimensions.data() });
+
+    // RehsapeDescriptor that is ignored in the IsReshapeSupported function
+    armnn::ReshapeDescriptor reshapeDescriptor;
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsReshapeSupported,
+                               data.m_Backends,
+                               isSupported,
+                               smallInfo,
+                               reshapedInfo,
+                               reshapeDescriptor);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    ARMNN_ASSERT(data.m_Network != nullptr);
+    armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
+
+    if (input0IsSmaller)
+    {
+        // Input0 is the "smaller" tensor, connect the reshape layer as follows:
+        //
+        //  Input0 Input1
+        //     |     |
+        //  Reshape  |
+        //      \   /
+        //    StartLayer
+
+        reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
+        input1.Connect(startLayer->GetInputSlot(1));
+    }
+    else
+    {
+        // Input1 is the "smaller" tensor, connect the reshape layer as follows:
+        //
+        //  Input0 Input1
+        //     |     |
+        //     |  Reshape
+        //      \   /
+        //    StartLayer
+
+        input0.Connect(startLayer->GetInputSlot(0));
+        reshapeLayer.GetOutputSlot(0).Connect(startLayer->GetInputSlot(1));
+    }
+
+    return true;
+}
+
+void CalcPadding(uint32_t input,
+                 uint32_t kernel,
+                 uint32_t stride,
+                 uint32_t& outPadHead,
+                 uint32_t& outPadTail,
+                 PaddingScheme scheme)
+{
+    int32_t padHead;
+    int32_t padTail;
+    calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
+    outPadHead = armnn::numeric_cast<uint32_t>(padHead);
+    outPadTail = armnn::numeric_cast<uint32_t>(padTail);
+}
+
+void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t dilation, uint32_t& outPadHead,
+                 uint32_t& outPadTail, ::android::nn::PaddingScheme scheme)
+{
+    int32_t padHead;
+    int32_t padTail;
+    calculateExplicitPadding(input, stride, dilation, kernel, scheme, &padHead, &padTail);
+    outPadHead = armnn::numeric_cast<uint32_t>(padHead);
+    outPadTail = armnn::numeric_cast<uint32_t>(padTail);
+}
+
+inline void CalcPaddingTransposeConv(uint32_t output, uint32_t kernel, int32_t stride, int32_t& outPadHead,
+                              int32_t& outPadTail, ::android::nn::PaddingScheme scheme)
+{
+    calculateExplicitPaddingTransposeConv(output, stride, kernel, scheme, &outPadHead, &outPadTail);
+}
+
+Shape GetOperandShape(const Operand& operand)
+{
+    Shape shape;
+    shape.type = OperandType(operand.type);
+    shape.dimensions = operand.dimensions;
+    shape.scale = operand.scale;
+    shape.offset = operand.zeroPoint;
+    return shape;
+}
+
+
+// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
+// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
+// we accept some tolerance. We don't want ArmNN itself to accept these inconsistencies as it is up to the
+// user (us, in this case) to ensure they match.
+void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
+                                   const armnn::TensorInfo& weightInfo,
+                                   const armnn::TensorInfo& inputInfo)
+{
+    if (weightInfo.HasPerAxisQuantization())
+    {
+        // NOTE: Bias scale is always set to 0 for per-axis quantization and
+        // it needs to be calculated: scale[i] = input_scale * weight_scale[i]
+        auto UpdateBiasScaleValue = [&inputInfo](float biasScale) -> float
+        {
+            return biasScale * inputInfo.GetQuantizationScale();
+        };
+
+        std::vector<float> biasScales(weightInfo.GetQuantizationScales());
+        std::transform(biasScales.begin(), biasScales.end(), biasScales.begin(), UpdateBiasScaleValue);
+
+        biasInfo.SetQuantizationScales(biasScales);
+        // bias is expected to be a 1d tensor, set qdim=0
+        biasInfo.SetQuantizationDim(0);
+
+        VLOG(DRIVER) << "Bias quantization params have been updated for per-axis quantization";
+    }
+    else
+    {
+        const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
+        if (biasInfo.GetQuantizationScale() != expectedBiasScale)
+        {
+            if (armnnUtils::within_percentage_tolerance(biasInfo.GetQuantizationScale(), expectedBiasScale, 1.0f))
+            {
+                VLOG(DRIVER) << "Bias quantization scale has been modified to match input * weights";
+                biasInfo.SetQuantizationScale(expectedBiasScale);
+            }
+        }
+    }
+}
+
+// 4D Tensor Permutations
+const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
+const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
+const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
+
+// 3D Permutation Vectors
+const armnn::PermutationVector RotateTensorLeft({ 1U, 2U, 0U });
+const armnn::PermutationVector RotateTensorRight({ 2U, 0U, 1U });
+
+template<typename OSlot>
+armnn::IConnectableLayer& AddTransposeLayer(armnn::INetwork& network, OSlot& input,
+                                            const armnn::PermutationVector& mappings)
+{
+    // Add swizzle layer
+    armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
+
+    ARMNN_ASSERT(layer != nullptr);
+
+    // Connect input to swizzle layer
+    input.Connect(layer->GetInputSlot(0));
+
+    // Setup swizzled output
+    const armnn::TensorInfo outInfo = armnnUtils::TransposeTensorShape(input.GetTensorInfo(), mappings);
+    layer->GetOutputSlot(0).SetTensorInfo(outInfo);
+
+    return *layer;
+}
+
+bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
+                               const armnn::TensorShape & outputShape,
+                               uint32_t concatDim)
+{
+    // Validate the output shape is correct given the input shapes (which have just been validated)
+    unsigned int numDimensions = inputShapes[0].GetNumDimensions();
+    if (outputShape.GetNumDimensions() != numDimensions)
+    {
+        return Fail("%s: Output shape has wrong number of dimensions", __func__);
+    }
+
+    unsigned int outputSizeAlongConcatenatedDimension = 0;
+    for (unsigned int i = 0; i < inputShapes.size(); i++)
+    {
+        outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
+    }
+
+    for (unsigned int i = 0; i < numDimensions; ++i)
+    {
+        if (i == concatDim)
+        {
+            if (outputShape[i] != outputSizeAlongConcatenatedDimension)
+            {
+                return Fail(
+                        "%s: Invalid output shape for dimension %d (%d != %d)",
+                        __func__,
+                        i,
+                        outputShape[i],
+                        outputSizeAlongConcatenatedDimension);
+            }
+        }
+        else
+        {
+            if (outputShape[i] != inputShapes[0][i])
+            {
+                return Fail("%s: Invalid output shape", __func__);
+            }
+        }
+    }
+
+    return true;
+}
+
+inline bool RequiresReshape(armnn::TensorShape & inputShape)
+{
+    return inputShape.GetNumDimensions() < 3;
+}
+
+inline void SwizzleInputs(armnn::INetwork& network,
+                   std::vector<LayerInputHandle>& inputs,
+                   std::vector<armnn::TensorShape>& inputShapes,
+                   const armnn::PermutationVector& mapping)
+{
+    if (!mapping.IsEqual(IdentityPermutation4D))
+    {
+        size_t nInputs = inputs.size();
+        for (size_t i=0; i<nInputs; ++i)
+        {
+            // add swizzle layer
+            armnn::IConnectableLayer& swizzleLayer = AddTransposeLayer(network, inputs[i], mapping);
+            auto& outputSlot = swizzleLayer.GetOutputSlot(0);
+            auto& outputInfo = outputSlot.GetTensorInfo();
+            // replace inputs with the swizzled ones
+            inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
+            inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
+        }
+    }
+}
+
+bool TransposeInputTensors(ConversionData& data,
+                          std::vector<LayerInputHandle>& inputs,
+                          std::vector<armnn::TensorShape>& inputShapes,
+                          const armnn::PermutationVector& mapping)
+{
+    // If we have a IdentityPermutation4D or IdentityPermutation3D then we are not permuting
+    if (!mapping.IsEqual(IdentityPermutation4D) && !mapping.IsEqual(IdentityPermutation3D))
+    {
+        armnn::TensorInfo outputTransposeInfo;
+        size_t nInputs = inputs.size();
+        for (size_t i=0; i<nInputs; ++i)
+        {
+            // check permute layer
+            armnn::TransposeDescriptor transposeDesc;
+            transposeDesc.m_DimMappings = mapping;
+            outputTransposeInfo = armnnUtils::TransposeTensorShape(inputs[i].GetTensorInfo(), mapping);
+
+            bool isSupported = false;
+            FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                       IsTransposeSupported,
+                                       data.m_Backends,
+                                       isSupported,
+                                       inputs[i].GetTensorInfo(),
+                                       outputTransposeInfo,
+                                       transposeDesc);
+            if (!isSupported)
+            {
+                return false;
+            }
+
+        }
+        SwizzleInputs(*data.m_Network, inputs, inputShapes, mapping);
+    }
+    return true;
+}
+
+bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
+                                       int32_t & concatDimension,
+                                       std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
+{
+    bool needPermute = false;
+    ARMNN_ASSERT(numberOfDimensions >= 3);
+
+    // ArmNN uses Compute Library subtensors to perform concatenation
+    // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
+    // or along dimension 0 or 2 for a 3-D tensor.
+    if (numberOfDimensions == 4 && concatDimension == 2)
+    {
+        concatDimension = 1;
+        permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
+        needPermute = true;
+    }
+    else if (numberOfDimensions == 3 && concatDimension == 1)
+    {
+        concatDimension = 0;
+        permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
+        needPermute = true;
+    }
+    // If the tensor is 3-D and the concat dimension is 2 then we don't need to permute but we do need to change the
+    // permutation identity to only have 3 dimensions
+    else if (numberOfDimensions == 3 && concatDimension == 2)
+    {
+        permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
+    }
+    return needPermute;
+}
+
+} // anonymous namespace
+
+namespace armnn_driver
+{
+using namespace android::nn;
+
+//// Creates an ArmNN activation layer and connects it to the given layer, if the
+//// passed in AndroidNN activation function requires so.
+//// @return The end layer of the sequence of layers built for the given AndroidNN
+//// activation function or nullptr if an error occurred (e.g. unsupported activation).
+//// Note that the end layer matches the input layer if no activation is required
+//// (the sequence of layers has length 1).
+armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
+                                            ActivationFn activation,
+                                            armnn::IConnectableLayer* prevLayer,
+                                            ConversionData& data);
+
+
+inline const Operand* GetInputOperand(const Operation& operation,
+                                      uint32_t inputIndex,
+                                      const Model& model,
+                                      bool failOnIndexOutOfBounds = true)
+{
+    if (inputIndex >= operation.inputs.size())
+    {
+        if (failOnIndexOutOfBounds)
+        {
+            Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
+        }
+        return nullptr;
+    }
+
+    // Model should have been validated beforehand
+    ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
+    return &getMainModel(model).operands[operation.inputs[inputIndex]];
+}
+
+inline const Operand* GetOutputOperand(const Operation& operation,
+                                       uint32_t outputIndex,
+                                       const Model& model)
+{
+    if (outputIndex >= operation.outputs.size())
+    {
+        Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
+        return nullptr;
+    }
+
+    // Model should have been validated beforehand
+    ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
+
+    return &getMainModel(model).operands[operation.outputs[outputIndex]];
+}
+
+const void* GetOperandValueReadOnlyAddress(const Operand& operand,
+                                           const Model& model,
+                                           const ConversionData& data,
+                                           bool optional = false);
+
+inline bool GetOperandType(const Operation& operation,
+                           uint32_t inputIndex,
+                           const Model& model,
+                           OperandType& type)
+{
+    const Operand* operand = GetInputOperand(operation, inputIndex, model);
+    if (!operand)
+    {
+        return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
+    }
+
+    type = operand->type;
+    return true;
+}
+
+inline bool IsOperandConstant(const Operand& operand)
+{
+    OperandLifeTime lifetime = operand.lifetime;
+
+    return lifetime == OperandLifeTime::CONSTANT_COPY ||
+           lifetime == OperandLifeTime::CONSTANT_REFERENCE ||
+           lifetime == OperandLifeTime::POINTER ||
+           lifetime == OperandLifeTime::NO_VALUE;
+}
+
+bool IsWeightsValid(const Operation& operation, uint32_t inputIndex, const Model& model);
+
+ConstTensorPin ConvertOperandToConstTensorPin(const Operand& operand,
+                                              const Model& model,
+                                              const ConversionData& data,
+                                              const armnn::PermutationVector& dimensionMappings = g_DontPermute,
+                                              const armnn::TensorShape* overrideTensorShape = nullptr,
+                                              bool optional = false);
+
+inline ConstTensorPin ConvertOperationInputToConstTensorPin(
+        const Operation& operation,
+        uint32_t inputIndex,
+        const Model& model,
+        const ConversionData& data,
+        const armnn::PermutationVector& dimensionMappings = g_DontPermute,
+        const armnn::TensorShape* overrideTensorShape = nullptr,
+        bool optional = false)
+{
+    const Operand* operand = GetInputOperand(operation, inputIndex, model);
+    if (!operand)
+    {
+        Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
+        return ConstTensorPin();
+    }
+    return ConvertOperandToConstTensorPin(*operand,
+                                          model,
+                                          data,
+                                          dimensionMappings,
+                                          overrideTensorShape,
+                                          optional);
+}
+
+template <typename OutputType>
+bool GetInputScalar(const Operation& operation,
+                    uint32_t inputIndex,
+                    OperandType type,
+                    OutputType& outValue,
+                    const Model& model,
+                    const ConversionData& data,
+                    bool optional = false)
+{
+    const Operand* operand = GetInputOperand(operation, inputIndex, model);
+    if (!optional && !operand)
+    {
+        return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
+    }
+
+    if (!optional && operand->type != type)
+    {
+        VLOG(DRIVER) << __func__ << ": unexpected operand type: " << operand->type << " should be: " << type;
+        return false;
+    }
+
+    if (!optional && operand->location.length != sizeof(OutputType))
+    {
+        return Fail("%s: incorrect operand location length: %i (should be %i)",
+                    __func__, operand->location.length, sizeof(OutputType));
+    }
+
+    const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
+    if (!optional && !valueAddress)
+    {
+        return Fail("%s: failed to get address for operand", __func__);
+    }
+
+    if(!optional)
+    {
+        outValue = *(static_cast<const OutputType*>(valueAddress));
+    }
+
+    return true;
+}
+
+inline bool GetInputInt32(const Operation& operation,
+                          uint32_t inputIndex,
+                          int32_t& outValue,
+                          const Model& model,
+                          const ConversionData& data)
+{
+    return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue, model, data);
+}
+
+inline bool GetInputFloat32(const Operation& operation,
+                            uint32_t inputIndex,
+                            float& outValue,
+                            const Model& model,
+                            const ConversionData& data)
+{
+    return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue, model, data);
+}
+
+inline bool GetInputActivationFunctionImpl(const Operation& operation,
+                                           uint32_t inputIndex,
+                                           OperandType type,
+                                           ActivationFn& outActivationFunction,
+                                           const Model& model,
+                                           const ConversionData& data)
+{
+    if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
+    {
+        VLOG(DRIVER) << __func__ << ": unexpected operand type: " << type
+                     << " should be OperandType::INT32 or OperandType::TENSOR_INT32";
+        return false;
+    }
+
+    int32_t activationFunctionAsInt;
+    if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt, model, data))
+    {
+        return Fail("%s: failed to get activation input value", __func__);
+    }
+    outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
+    return true;
+}
+
+inline bool GetInputActivationFunction(const Operation& operation,
+                                       uint32_t inputIndex,
+                                       ActivationFn& outActivationFunction,
+                                       const Model& model,
+                                       const ConversionData& data)
+{
+    return GetInputActivationFunctionImpl(operation,
+                                          inputIndex,
+                                          OperandType::INT32,
+                                          outActivationFunction,
+                                          model,
+                                          data);
+}
+
+inline bool GetInputActivationFunctionFromTensor(const Operation& operation,
+                                                 uint32_t inputIndex,
+                                                 ActivationFn& outActivationFunction,
+                                                 const Model& model,
+                                                 const ConversionData& data)
+{
+    // This only accepts a 1-D tensor of size 1
+    return GetInputActivationFunctionImpl(operation,
+                                          inputIndex,
+                                          OperandType::INT32,
+                                          outActivationFunction,
+                                          model,
+                                          data);
+}
+
+
+inline bool GetOptionalInputActivation(const Operation& operation,
+                                       uint32_t inputIndex,
+                                       ActivationFn& activationFunction,
+                                       const Model& model,
+                                       const ConversionData& data)
+{
+    if (operation.inputs.size() <= inputIndex)
+    {
+        activationFunction = ActivationFn::kActivationNone;
+    }
+    else
+    {
+        if (!GetInputActivationFunction(operation, inputIndex, activationFunction, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs", __func__);
+        }
+    }
+    return true;
+}
+
+template<typename ConvolutionDescriptor>
+bool GetOptionalConvolutionDilationParams(const Operation& operation,
+                                          uint32_t dilationXIndex,
+                                          ConvolutionDescriptor& descriptor,
+                                          const Model& model,
+                                          const ConversionData& data)
+{
+    bool success = true;
+    if (operation.inputs.size() >= dilationXIndex + 2)
+    {
+        success &= GetInputScalar(operation,
+                                  dilationXIndex,
+                                  OperandType::INT32,
+                                  descriptor.m_DilationX,
+                                  model,
+                                  data);
+        success &= GetInputScalar(operation,
+                                  dilationXIndex + 1,
+                                  OperandType::INT32,
+                                  descriptor.m_DilationY,
+                                  model,
+                                  data);
+    }
+
+    return success;
+}
+
+inline bool GetOptionalBool(const Operation& operation,
+                            uint32_t inputIndex,
+                            const Model& model,
+                            const ConversionData& data)
+{
+    const Operand* operand = GetInputOperand(operation, inputIndex, model);
+    if (!operand)
+    {
+        return false;
+    }
+
+    if (!IsBool(*operand))
+    {
+        return false;
+    }
+
+    const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
+    if (!valueAddress)
+    {
+        return false;
+    }
+
+    return *(static_cast<const bool*>(valueAddress));
+}
+
+bool GetTensorInt32Values(const Operand& operand,
+                                 std::vector<int32_t>& outValues,
+                                 const Model& model,
+                                 const ConversionData& data);
+
+bool GetInputPaddingScheme(const Operation& operation,
+                           uint32_t inputIndex,
+                           PaddingScheme& outPaddingScheme,
+                           const Model& model,
+                           const ConversionData& data);
+
+LayerInputHandle ConvertToLayerInputHandle(const Operation& operation,
+                                           uint32_t inputIndex,
+                                           const Model& model,
+                                           ConversionData& data,
+                                           const armnn::PermutationVector& dimensionMappings = g_DontPermute);
+
+bool SetupAndTrackLayerOutputSlot(const Operation& operation,
+                                  uint32_t operationOutputIndex,
+                                  armnn::IConnectableLayer& layer,
+                                  uint32_t layerOutputIndex,
+                                  const Model& model,
+                                  ConversionData& data,
+                                  const armnn::TensorInfo* overrideOutputInfo = nullptr,
+                                  const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
+                                  const ActivationFn& activationFunction = ActivationFn::kActivationNone,
+                                  bool inferOutputShapes = false);
+
+armnn::DataLayout OptionalDataLayout(const Operation& operation,
+                                     uint32_t inputIndex,
+                                     const Model& model,
+                                     ConversionData& data);
+
+inline bool SetupAndTrackLayerOutputSlot(
+        const Operation& operation,
+        uint32_t outputIndex,
+        armnn::IConnectableLayer& layer,
+        const Model& model,
+        ConversionData& data,
+        const armnn::TensorInfo* overrideOutputInfo = nullptr,
+        const std::function <void (const armnn::TensorInfo&, bool&)>& validateFunc = nullptr,
+        const ActivationFn& activationFunction = ActivationFn::kActivationNone)
+{
+    return SetupAndTrackLayerOutputSlot(operation,
+                                        outputIndex,
+                                        layer,
+                                        outputIndex,
+                                        model,
+                                        data,
+                                        overrideOutputInfo,
+                                        validateFunc,
+                                        activationFunction);
+}
+
+bool ConvertToActivation(const Operation& operation,
+                         const char* operationName,
+                         const armnn::ActivationDescriptor& activationDesc,
+                         const Model& model,
+                         ConversionData& data);
+
+bool ConvertPaddings(const Operation& operation,
+                     const Model& model,
+                     ConversionData& data,
+                     unsigned int rank,
+                     armnn::PadDescriptor& padDescriptor);
+bool ConvertReduce(const Operation& operation,
+                   const Model& model,
+                   ConversionData& data,
+                   armnn::ReduceOperation reduceOperation);
+
+bool ConvertPooling2d(const Operation& operation,
+                      const char* operationName,
+                      armnn::PoolingAlgorithm poolType,
+                      const Model& model,
+                      ConversionData& data);
+
+inline bool IsQSymm8(const Operand& operand)
+{
+    return operand.type == OperandType::TENSOR_QUANT8_SYMM;
+}
+
+enum class DequantizeStatus
+{
+    SUCCESS,
+    NOT_REQUIRED,
+    INVALID_OPERAND
+};
+
+using DequantizeResult = std::tuple<std::unique_ptr<float[]>, size_t, armnn::TensorInfo, DequantizeStatus>;
+
+DequantizeResult DequantizeIfRequired(size_t operand_index,
+                                      const Operation& operation,
+                                      const Model& model,
+                                      const ConversionData& data);
+
+ConstTensorPin DequantizeAndMakeConstTensorPin(const Operation& operation,
+                                               const Model& model,
+                                               const ConversionData& data,
+                                               size_t operandIndex,
+                                               bool optional = false);
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/Converter.cpp b/shim/sl/canonical/Converter.cpp
new file mode 100644
index 0000000..ade8b4f
--- /dev/null
+++ b/shim/sl/canonical/Converter.cpp
@@ -0,0 +1,5628 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Converter.hpp"
+#include <half/half.hpp>
+#include <armnnUtils/TensorUtils.hpp>
+
+namespace armnn_driver
+{
+
+using namespace android::nn;
+using Half = half_float::half;
+
+namespace
+{
+
+} // anonymouse namespace
+
+bool Converter::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
+{
+    switch (operation.type)
+    {
+        case OperationType::ABS:
+            return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
+        case OperationType::ADD:
+            return ConvertAdd(operation, model, data);
+        case OperationType::ARGMAX:
+            return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
+        case OperationType::ARGMIN:
+            return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Min);
+        case OperationType::AVERAGE_POOL_2D:
+            return ConvertAveragePool2d(operation, model, data);
+        case OperationType::BATCH_TO_SPACE_ND:
+            return ConvertBatchToSpaceNd(operation, model, data);
+        case OperationType::CAST:
+            return ConvertCast(operation, model, data);
+        case OperationType::CONCATENATION:
+            return ConvertConcatenation(operation, model, data);
+        case OperationType::CONV_2D:
+            return ConvertConv2d(operation, model, data);
+        case OperationType::DEPTH_TO_SPACE:
+            return ConvertDepthToSpace(operation, model, data);
+        case OperationType::DEPTHWISE_CONV_2D:
+            return ConvertDepthwiseConv2d(operation, model, data);
+        case OperationType::DEQUANTIZE:
+            return ConvertDequantize(operation, model, data);
+        case OperationType::DIV:
+            return ConvertDiv(operation, model, data);
+        case OperationType::ELU:
+            return ConvertElu(operation, model, data);
+        case OperationType::EQUAL:
+            return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
+        case OperationType::EXP:
+            return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Exp);
+        case OperationType::EXPAND_DIMS:
+            return ConvertExpandDims(operation, model, data);
+        case OperationType::FILL:
+            return ConvertFill(operation, model, data);
+        case OperationType::FLOOR:
+            return ConvertFloor(operation, model, data);
+        case OperationType::FULLY_CONNECTED:
+            return ConvertFullyConnected(operation, model, data);
+        case OperationType::GATHER:
+            return ConvertGather(operation, model, data);
+        case OperationType::GREATER:
+            return ConvertComparison(operation, model, data, ComparisonOperation::Greater);
+        case OperationType::GREATER_EQUAL:
+            return ConvertComparison(operation, model, data, ComparisonOperation::GreaterOrEqual);
+        case OperationType::GROUPED_CONV_2D:
+            return ConvertGroupedConv2d(operation, model, data);
+        case OperationType::HARD_SWISH:
+            return ConvertHardSwish(operation, model, data);
+        case OperationType::INSTANCE_NORMALIZATION:
+            return ConvertInstanceNormalization(operation, model, data);
+        case OperationType::L2_NORMALIZATION:
+            return ConvertL2Normalization(operation, model, data);
+        case OperationType::L2_POOL_2D:
+            return ConvertL2Pool2d(operation, model, data);
+        case OperationType::LESS:
+            return ConvertComparison(operation, model, data, ComparisonOperation::Less);
+        case OperationType::LESS_EQUAL:
+            return ConvertComparison(operation, model, data, ComparisonOperation::LessOrEqual);
+        case OperationType::LOCAL_RESPONSE_NORMALIZATION:
+            return ConvertLocalResponseNormalization(operation, model, data);
+        case OperationType::LOG:
+            return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Log);
+        case OperationType::LOGICAL_AND:
+            return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalAnd);
+        case OperationType::LOGICAL_NOT:
+            return ConvertElementwiseUnary(operation, model, data, UnaryOperation::LogicalNot);
+        case OperationType::LOGICAL_OR:
+            return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalOr);
+        case OperationType::LOGISTIC:
+            return ConvertLogistic(operation, model, data);
+        case OperationType::LOG_SOFTMAX:
+            return ConvertLogSoftmax(operation, model, data);
+        case OperationType::LSTM:
+            return ConvertLstm(operation, model, data);
+        case OperationType::MAX_POOL_2D:
+            return ConvertMaxPool2d(operation, model, data);
+        case OperationType::MAXIMUM:
+            return ConvertMaximum(operation, model, data);
+        case OperationType::MEAN:
+            return ConvertMean(operation, model, data);
+        case OperationType::MINIMUM:
+            return ConvertMinimum(operation, model, data);
+        case OperationType::MUL:
+            return ConvertMul(operation, model, data);
+        case OperationType::NEG:
+            return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Neg);
+        case OperationType::NOT_EQUAL:
+            return ConvertComparison(operation, model, data, ComparisonOperation::NotEqual);
+        case OperationType::PAD:
+            return ConvertPad(operation, model, data);
+        case OperationType::PAD_V2:
+            return ConvertPadV2(operation, model, data);
+        case OperationType::PRELU:
+            return ConvertPrelu(operation, model, data);
+        case OperationType::QUANTIZE:
+            return ConvertQuantize(operation, model, data);
+        case OperationType::QUANTIZED_LSTM:
+            return ConvertQuantizedLstm(operation, model, data);
+        case OperationType::QUANTIZED_16BIT_LSTM:
+            return ConvertQuantized16BitLstm(operation, model, data);
+        case OperationType::RANK:
+            return ConvertRank(operation, model, data);
+        case OperationType::REDUCE_MAX:
+            return ConvertReduce(operation, model, data, armnn::ReduceOperation::Max);
+        case OperationType::REDUCE_MIN:
+            return ConvertReduce(operation, model, data, armnn::ReduceOperation::Min);
+        case OperationType::REDUCE_SUM:
+            return ConvertReduce(operation, model, data, armnn::ReduceOperation::Sum);
+        case OperationType::RELU:
+            return ConvertReLu(operation, model, data);
+        case OperationType::RELU1:
+            return ConvertReLu1(operation, model, data);
+        case OperationType::RELU6:
+            return ConvertReLu6(operation, model, data);
+        case OperationType::RESHAPE:
+            return ConvertReshape(operation, model, data);
+        case OperationType::RESIZE_BILINEAR:
+            return ConvertResize(operation, model, data, ResizeMethod::Bilinear);
+        case OperationType::RESIZE_NEAREST_NEIGHBOR:
+            return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
+        case OperationType::RSQRT:
+            return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Rsqrt);
+        case OperationType::SIN:
+            return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Sin);
+        case OperationType::SOFTMAX:
+            return ConvertSoftmax(operation, model, data);
+        case OperationType::SPACE_TO_BATCH_ND  :
+            return ConvertSpaceToBatchNd(operation, model, data);
+        case OperationType::SPACE_TO_DEPTH:
+            return ConvertSpaceToDepth(operation, model, data);
+        case OperationType::SQRT:
+            return ConvertSqrt(operation, model, data);
+        case OperationType::SQUEEZE:
+            return ConvertSqueeze(operation, model, data);
+        case OperationType::STRIDED_SLICE:
+            return ConvertStridedSlice(operation, model, data);
+        case OperationType::SUB:
+            return ConvertSub(operation, model, data);
+        case OperationType::TRANSPOSE:
+            return ConvertTranspose(operation, model, data);
+        case OperationType::TRANSPOSE_CONV_2D:
+            return ConvertTransposeConv2d(operation, model, data);
+        case OperationType::TANH:
+            return ConvertTanH(operation, model, data);
+        default:
+            VLOG(DRIVER) << "Operation type: " << operation.type << "is not supported in ArmnnDriver";
+            return false;
+    }
+}
+
+bool Converter::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertAdd()";
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2, and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return false;
+    }
+
+    const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
+    const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsAdditionSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo0,
+                                   inputInfo1,
+                                   outputInfo);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
+
+    bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
+    if (!isReshapeSupported)
+    {
+        return false;
+    }
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
+                                        data, nullptr, validateFunc, activationFunction);
+}
+
+bool Converter::ConvertArgMinMax(const Operation& operation,
+                                 const Model& model,
+                                 ConversionData& data,
+                                 armnn::ArgMinMaxFunction argMinMaxFunction)
+{
+    VLOG(DRIVER) << "Converter::ConvertArgMinMax()";
+    VLOG(DRIVER) << "argMinMaxFunction = " << GetArgMinMaxFunctionAsCString(argMinMaxFunction);
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+
+    if (!input0.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    int32_t axis;
+    if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
+    int rank = static_cast<int>(inputInfo.GetNumDimensions());
+
+    if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
+    {
+        // Square bracket denotes inclusive n while parenthesis denotes exclusive n
+        // E.g. Rank 4 tensor can have axis in range [-4, 3)
+        // -1 == 3, -2 == 2, -3 == 1, -4 == 0
+        return Fail("%s: Axis must be in range [-n, n)", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    armnn::ArgMinMaxDescriptor descriptor;
+    descriptor.m_Function = argMinMaxFunction;
+    descriptor.m_Axis     = axis;
+
+    bool isSupported = false;
+
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsArgMinMaxSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo0,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
+    assert(layer != nullptr);
+
+    input0.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertAveragePool2d()";
+    return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Average, model, data);
+}
+
+bool Converter::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertBatchToSpaceNd()";
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    const Operand* blockOperand = GetInputOperand(operation, 1, model);
+    if (!blockOperand)
+    {
+        return Fail("%s: Could not read input 1", __func__);
+    }
+
+    // Convert the block operand to int32
+    std::vector<int32_t> block;
+    if (!GetTensorInt32Values(*blockOperand, block, model, data))
+    {
+        return Fail("%s: Input 1 has invalid values", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+
+    unsigned int rank = inputInfo.GetNumDimensions();
+    if (rank != 4)
+    {
+        Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
+    }
+
+    if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
+    {
+        return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
+                    " greater than or equal to 1", __func__);
+    }
+
+    armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
+    batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
+    batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
+
+    if (Is12OrLaterOperand(*output))
+    {
+        batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
+    }
+    // Setting crops to 0,0 0,0 as it is not supported in Android NN API
+    batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsBatchToSpaceNdSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   batchToSpaceNdDesc);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertCast(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertCast()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const TensorInfo& inputInfo  = input.GetTensorInfo();
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    bool isSupported = false;
+
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsCastSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddCastLayer();
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertComparison(const Operation& operation,
+                                  const Model& model,
+                                  ConversionData& data,
+                                  ComparisonOperation comparisonOperation)
+{
+    VLOG(DRIVER) << "Converter::ConvertComparison()";
+    VLOG(DRIVER) << "comparisonOperation = " << GetComparisonOperationAsCString(comparisonOperation);
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!(input0.IsValid() && input1.IsValid()))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const TensorInfo& inputInfo0 = input0.GetTensorInfo();
+    const TensorInfo& inputInfo1 = input1.GetTensorInfo();
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    ComparisonDescriptor descriptor(comparisonOperation);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsComparisonSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo0,
+                                   inputInfo1,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddComparisonLayer(descriptor);
+    assert(layer != nullptr);
+
+    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
+    if (!isReshapeSupported)
+    {
+        return false;
+    }
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        input0.Connect(layer->GetInputSlot(0));
+        input1.Connect(layer->GetInputSlot(1));
+    }
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+
+bool Converter::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertConcatenation()";
+
+    // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
+    if (operation.inputs.size() <= 1)
+    {
+        return Fail("%s: Operation has insufficient arguments", __func__);
+    }
+
+    // Get inputs and outputs
+    const std::size_t numInputTensors = operation.inputs.size() - 1;
+
+    int32_t concatDim;
+    if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Operation has no outputs", __func__);
+    }
+
+    armnn::TensorInfo  outputInfo      = GetTensorInfoForOperand(*outputOperand);
+    armnn::TensorShape outputShape     = outputInfo.GetShape();
+    const bool         isDynamicTensor = IsDynamicTensor(outputInfo);
+    //
+    // handle negative concat dims along the lines of tensorflow as described here:
+    //    https://www.tensorflow.org/api_docs/python/tf/concat
+    // "negative axis refers to axis + rank(values)-th dimension"
+    //
+    if (concatDim < 0)
+    {
+        concatDim += outputShape.GetNumDimensions();
+    }
+
+    if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
+    {
+        return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
+    }
+
+    std::vector<LayerInputHandle>   inputHandles;
+    std::vector<armnn::TensorShape> inputShapes;
+
+    inputHandles.reserve(numInputTensors);
+    inputShapes.reserve(numInputTensors);
+
+    bool          inputsHaveBeenReshaped = false;
+    unsigned int  tensorDimensionsAdded  = 0;
+    for (uint32_t i = 0; i < numInputTensors; ++i)
+    {
+        const Operand* operand = GetInputOperand(operation, i, model);
+        if (!operand)
+        {
+            return Fail("%s: Operation has invalid inputs", __func__);
+        }
+
+        LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
+        if (!operandInputHandle.IsValid())
+        {
+            return Fail("%s: Operation has invalid inputs", __func__);
+        }
+
+        armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
+        if (operandShape.GetNumDimensions() == 0)
+        {
+            return Fail("%s: Operands with rank 0 are not supported", __func__);
+        }
+
+        if (RequiresReshape(operandShape))
+        {
+            inputsHaveBeenReshaped = true;
+
+            armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
+
+            // Expand the tensor to three dimensions
+            if (operandShape.GetNumDimensions() == 2)
+            {
+                reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
+                tensorDimensionsAdded = 1;
+            }
+            else
+            {
+                reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
+                tensorDimensionsAdded = 2;
+            }
+
+            armnn::ReshapeDescriptor reshapeDescriptor;
+            reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
+
+            bool isSupported = false;
+            FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                       IsReshapeSupported,
+                                       data.m_Backends,
+                                       isSupported,
+                                       operandInputHandle.GetTensorInfo(),
+                                       reshapeInfo,
+                                       reshapeDescriptor);
+
+            if (!isSupported)
+            {
+                return false;
+            }
+            armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
+
+            // Point to the reshape operation rather then the input operation
+            operandShape       = reshapeInfo.GetShape();
+            operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
+        }
+
+        inputShapes.emplace_back(operandShape);
+        inputHandles.emplace_back(operandInputHandle);
+
+        if (!inputHandles.back().IsValid())
+        {
+            return Fail("%s: Operation has invalid inputs", __func__);
+        }
+    }
+
+    ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
+
+    if (inputsHaveBeenReshaped)
+    {
+        // Adjust the concatenation dimension by the amount of dimensions added (if any)
+        concatDim += tensorDimensionsAdded;
+
+        // Add extra dimensions to the output shape to reflect the addition of the reshape layers
+        if (tensorDimensionsAdded == 1)
+        {
+            if (IsDynamicTensor(outputInfo))
+            {
+                outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
+            }
+            else
+            {
+                outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
+            }
+        }
+        else if (tensorDimensionsAdded == 2)
+        {
+            if (IsDynamicTensor(outputInfo))
+            {
+                outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
+            }
+            else
+            {
+                outputShape = armnn::TensorShape({1, 1, outputShape[0]});
+            }
+        }
+    }
+
+    // Check if permutations is required and get the pair of permutations required for the concatenation.
+    // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
+    std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
+            std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
+    bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
+                                                         concatDim,
+                                                         permutationPair);
+
+    // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
+    if (!isDynamicTensor)
+    {
+        if (needPermute)
+        {
+            outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
+        }
+
+        outputInfo.SetShape(outputShape);
+    }
+    // this is no-op for identity swizzles, otherwise it replaces both
+    // the handles and shapes with the swizzled layer output handles and shapes
+    if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
+    {
+        return false;
+    }
+
+    // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
+    armnn::OriginsDescriptor concatDescriptor;
+
+    try
+    {
+        // The concat descriptor is always created across the only supported concat dimension
+        // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
+        concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
+                                                                   inputShapes.end(),
+                                                                   concatDim);
+    } catch (std::exception& error)
+    {
+        return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
+    }
+
+    // Validate the output shape is correct given the input shapes based on the
+    // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
+    if (!isDynamicTensor)
+    {
+        if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
+        {
+            return Fail("%s: Error validating the output shape for concat", __func__);
+        }
+    }
+
+    std::vector<const armnn::TensorInfo*> inputTensorInfos;
+    std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
+                   [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
+
+    bool isSupported  = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
+        FORWARD_LAYER_SUPPORT_FUNC(__func__, IsConcatSupported, data.m_Backends, isSupported, inputTensorInfos,
+                                   outputInfo, concatDescriptor);
+    };
+
+    if (!isDynamicTensor)
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
+    assert(layer != nullptr);
+    layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+    // Connect inputs to the layer
+    const int numInputSlots = layer->GetNumInputSlots();
+    assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
+    for (int i = 0; i < numInputSlots; ++i)
+    {
+        // connect the input directly to the merge (concat) layer
+        inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
+    }
+
+    // Transpose the output shape
+    auto transposeOutputShape = [&](){
+        armnn::TransposeDescriptor transposeDesc;
+        transposeDesc.m_DimMappings = permutationPair.second;
+        armnn::TensorInfo inputTransposeInfo  = layer->GetOutputSlot(0).GetTensorInfo();
+        armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
+                                                                                 permutationPair.second);
+        isSupported = false;
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsTransposeSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputTransposeInfo,
+                                   outputTransposeInfo,
+                                   transposeDesc);
+        if (!isSupported)
+        {
+            return false;
+        }
+        // Add permutation layer and connect the output to it, the permutation becomes the output layer
+        armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
+                                                                     permutationPair.second);
+        layer = &deswizzleLayer;
+
+        return true;
+    };
+
+    if (needPermute && !isDynamicTensor)
+    {
+        transposeOutputShape();
+    }
+
+    if (inputsHaveBeenReshaped)
+    {
+        if (isDynamicTensor)
+        {
+            // Infer the output shapes of concat if outputs are type 1 dynamic
+            ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
+            if (!ValidateConcatOutputShape(inputShapes,
+                                           layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
+                                           concatDim))
+            {
+                return Fail("%s: Error validating the output shape for concat", __func__);
+            }
+            transposeOutputShape();
+        }
+
+        armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
+        // Undo the reshape knowing the amount of dimensions added
+        if (tensorDimensionsAdded == 1)
+        {
+            afterConcatInfo.SetShape(
+                    armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
+        }
+        else if (tensorDimensionsAdded == 2)
+        {
+            afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
+        }
+
+        armnn::ReshapeDescriptor reshapeDescriptor;
+        reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
+        armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
+
+        isSupported = false;
+        auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
+            FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                       IsReshapeSupported,
+                                       data.m_Backends,
+                                       isSupported,
+                                       concatInfo,
+                                       afterConcatInfo,
+                                       reshapeDescriptor);
+        };
+
+        if (!IsDynamicTensor(afterConcatInfo))
+        {
+            validateReshapeFunc(afterConcatInfo, isSupported);
+        }
+        else
+        {
+            isSupported = AreDynamicTensorsSupported();
+        }
+
+        if (!isSupported)
+        {
+            return false;
+        }
+        layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
+        return SetupAndTrackLayerOutputSlot(operation,
+                                            0,
+                                            *layer,
+                                            model,
+                                            data,
+                                            nullptr,
+                                            validateReshapeFunc);
+    }
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertConv2d()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const TensorInfo& inputInfo  = input.GetTensorInfo();
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    Convolution2dDescriptor desc;
+    desc.m_DataLayout = DataLayout::NHWC;
+
+    // Determine whether padding is implicit or explicit
+    bool implicitPadding = operation.inputs.size() == 7
+                            || (operation.inputs.size() >= 8
+                                 && GetInputOperand(operation, 7, model)->type == OperandType::BOOL);
+
+    if (implicitPadding)
+    {
+        desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data);
+    }
+    else if (operation.inputs.size() >= 10)
+    {
+        desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
+    }
+
+    const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
+
+    // ArmNN does not currently support non-fixed weights or bias
+    // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
+    // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
+    // the DataLayout is NCHW
+
+    if (!IsWeightsValid(operation, 1, model) && desc.m_DataLayout == DataLayout::NCHW)
+    {
+        return Fail("%s: Operation has unsupported weights OperandLifeTime", __func__);
+    }
+
+    LayerInputHandle weightsInput = (desc.m_DataLayout == DataLayout::NCHW)
+                                      ? ConvertToLayerInputHandle(operation, 1, model, data, OHWIToOIHW)
+                                      : ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!weightsInput.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data); // 1D
+    if (!biasInput.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    biasInput.SanitizeQuantizationScale(weightsInput, input);
+    armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
+    armnn::TensorInfo biasInfo    = biasInput.GetTensorInfo();
+
+    ActivationFn activation;
+    if (implicitPadding)
+    {
+        ::android::nn::PaddingScheme paddingScheme;
+        if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
+              || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
+              || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
+              || !GetInputActivationFunction(operation, 6, activation, model, data)
+              || !GetOptionalConvolutionDilationParams(operation, 8, desc, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
+        }
+
+        armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
+        unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
+        unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
+        const uint32_t kernelX = weightsInfo.GetShape()[widthIndex];
+        const uint32_t kernelY = weightsInfo.GetShape()[heightIndex];
+        const uint32_t inputX  = inputInfo.GetShape()[widthIndex];
+        const uint32_t inputY  = inputInfo.GetShape()[heightIndex];
+
+        CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
+        CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
+
+    }
+    else if (operation.inputs.size() >= 10)
+    {
+        // explicit padding
+        if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
+              || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
+              || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
+              || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
+              || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
+              || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
+              || !GetInputActivationFunction(operation, 9, activation, model, data)
+              || !GetOptionalConvolutionDilationParams(operation, 11, desc, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
+        }
+    }
+    else
+    {
+        return Fail("%s: Unsupported number of operation inputs", __func__);
+    }
+
+    desc.m_BiasEnabled = true;
+    Optional<TensorInfo> biases(biasInfo);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsConvolution2dSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   desc,
+                                   weightsInfo,
+                                   biases);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
+
+    if (!startLayer)
+    {
+        return Fail("%s: AddConvolution2dLayer failed", __func__);
+    }
+
+    input.Connect(startLayer->GetInputSlot(0));
+    weightsInput.Connect(startLayer->GetInputSlot(1));
+    biasInput.Connect(startLayer->GetInputSlot(2));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
+}
+
+bool Converter::ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertDepthToSpace()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid() )
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+    if (rank != 4)
+    {
+        return Fail("%s: Only inputs with rank 4 are supported", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    armnn::DepthToSpaceDescriptor descriptor;
+
+    GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_BlockSize, model, data);
+    if (descriptor.m_BlockSize <= 1)
+    {
+        return Fail("%s: Block size must be at least 1 in all dimensions");
+    }
+
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+    if (Is12OrLaterOperand(*output))
+    {
+        descriptor.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
+    }
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsDepthToSpaceSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    // ArmNN does not currently support non-fixed weights or bias
+    // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
+    const Operand* weightsOperand = GetInputOperand(operation, 1, model);
+
+    if (!weightsOperand)
+    {
+        return Fail("%s: Could not read weights", __func__);
+    }
+    // Basic sanity check on the weights shape.
+    // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
+    // [1, filter_height, filter_width, depth_out]
+    if (weightsOperand->dimensions[0] != 1)
+    {
+        return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
+    }
+
+    armnn::DepthwiseConvolution2dDescriptor desc;
+    desc.m_DataLayout = armnn::DataLayout::NHWC;
+
+    // Determine whether padding is implicit or explicit
+    bool implicitPadding = operation.inputs.size() == 8
+                           || (operation.inputs.size() >= 9
+                           && GetInputOperand(operation, 8, model)->type == OperandType::BOOL);
+
+    // Look ahead to find the optional DataLayout, if present
+    const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
+    desc.m_DataLayout = OptionalDataLayout(operation, dataLayoutFlagIndex, model, data);
+
+    armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
+    unsigned int widthIndex  = dataLayoutIndexed.GetWidthIndex();
+    unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
+
+    LayerInputHandle weightsInput = ConvertToLayerInputHandle(operation, 1, model, data);
+    if (!weightsInput.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* biasOperand = GetInputOperand(operation, 2, model);
+    if (!biasOperand)
+    {
+        return Fail("%s: Could not read bias", __func__);
+    }
+
+    LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data); // 1D
+    if (!biasInput.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    biasInput.SanitizeQuantizationScale(weightsInput, input);
+    armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
+    armnn::TensorInfo biasInfo    = biasInput.GetTensorInfo();
+
+    ActivationFn activation;
+    if (implicitPadding)
+    {
+        ::android::nn::PaddingScheme paddingScheme;
+        if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
+                || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
+                || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
+                || !GetInputActivationFunction(operation, 7, activation, model, data)
+                || !GetOptionalConvolutionDilationParams(operation, 9, desc, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
+        }
+
+        const uint32_t kernelX = weightsInfo.GetShape()[2];
+        const uint32_t kernelY = weightsInfo.GetShape()[1];
+        const uint32_t inputX  = inputInfo.GetShape()[widthIndex];
+        const uint32_t inputY  = inputInfo.GetShape()[heightIndex];
+
+        CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
+        CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
+    }
+    else if (operation.inputs.size() >= 11)
+    {
+        // explicit padding
+        if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
+                || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
+                || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
+                || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
+                || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
+                || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
+                || !GetInputActivationFunction(operation,  10, activation, model, data)
+                || !GetOptionalConvolutionDilationParams(operation, 12, desc, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
+        }
+    }
+    else
+    {
+        return Fail("%s: Unsupported number of operation inputs", __func__);
+    }
+
+    desc.m_BiasEnabled = true;
+    Optional<TensorInfo> biases(biasInfo);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsDepthwiseConvolutionSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   desc,
+                                   weightsInfo,
+                                   biases);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
+
+    if (!startLayer)
+    {
+        return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
+    }
+
+    input.Connect(startLayer->GetInputSlot(0));
+
+    // Connect weights and bias inputs
+    weightsInput.Connect(startLayer->GetInputSlot(1));
+    biasInput.Connect(startLayer->GetInputSlot(2));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
+}
+
+bool Converter::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertDequantize()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid input", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
+    if (quantizationDim.has_value() && quantizationDim.value() != 0)
+    {
+        return Fail("%s: Operation has quantization dimension different than 0", __func__);
+    }
+
+    const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Operation has invalid outputs", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsDequantizeSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertDiv()";
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2
+    // and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsDivisionSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input0.GetTensorInfo(),
+                                   input1.GetTensorInfo(),
+                                   outputInfo);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
+
+    bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
+    if (!isReshapeSupported)
+    {
+        return false;
+    }
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
+                                        data, nullptr, validateFunc, activationFunction);
+}
+
+bool Converter::ConvertElementwiseUnary(const Operation& operation,
+                                        const Model& model,
+                                        ConversionData& data,
+                                        UnaryOperation unaryOperation)
+{
+    VLOG(DRIVER) << "Converter::ConvertElementwiseUnary()";
+    VLOG(DRIVER) << "unaryOperation = " << GetUnaryOperationAsCString(unaryOperation);
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid input", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const TensorInfo& inputInfo = input.GetTensorInfo();
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    ElementwiseUnaryDescriptor descriptor(unaryOperation);
+
+    bool isSupported = false;
+
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsElementwiseUnarySupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertElu(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertElu()";
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input0.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // Determine data type of input tensor
+    OperandType inputType;
+    if (!GetOperandType(operation, 0, model, inputType))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    ActivationDescriptor desc;
+    desc.m_Function = ActivationFunction::Elu;
+
+    // Read alpha
+    if (inputType == OperandType::TENSOR_FLOAT16)
+    {
+        Half alpha;
+
+        if (!GetInputScalar(operation, 1, OperandType::FLOAT16, alpha, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
+        }
+
+        desc.m_A = static_cast<float>(alpha);
+    }
+    else if (inputType == OperandType::TENSOR_FLOAT32)
+    {
+        if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_A, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
+        }
+    }
+    else
+    {
+        return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
+    }
+
+    return ::ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool Converter::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertExpandDims()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid input", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Operation has invalid output", __func__);
+    }
+
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    int32_t axis;
+    if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
+    {
+        return Fail("%s: failed to get axis input value", __func__);
+    }
+
+    TensorShape targetShape;
+
+    try
+    {
+        targetShape = armnnUtils::ExpandDims(input.GetTensorInfo().GetShape(), axis);
+    }
+    catch (const std::exception& e)
+    {
+        return Fail("%s: %s", __func__, e.what());
+    }
+
+    ReshapeDescriptor reshapeDescriptor;
+    reshapeDescriptor.m_TargetShape = targetShape;
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsReshapeSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input.GetTensorInfo(),
+                                   outputInfo,
+                                   reshapeDescriptor);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        if (targetShape != outputInfo.GetShape())
+        {
+            return Fail("%s: Shape of the output operand does not match the resolved expanded shape", __func__);
+        }
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertFill(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertFill()";
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output", __func__);
+    }
+
+    const TensorInfo& inputInfo  = input.GetTensorInfo();
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    if (IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    // Determine data type of output tensor
+    OperandType outputType = output->type;
+    FillDescriptor descriptor;
+    // Read the scalar fill value
+    if (outputType == OperandType::TENSOR_FLOAT16)
+    {
+        Half value;
+
+        if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
+        }
+
+        descriptor.m_Value = static_cast<float>(value);
+    }
+    else if (outputType == OperandType::TENSOR_FLOAT32)
+    {
+        if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Value, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
+        }
+    }
+    else if (outputType == OperandType::TENSOR_INT32)
+    {
+        int32_t value;
+
+        if (!GetInputScalar(operation, 1, OperandType::INT32, value, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
+        }
+
+        descriptor.m_Value = static_cast<float>(value);
+    }
+    else
+    {
+        return Fail("%s: Unsupported input tensor type: %d", __func__, outputType);
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsFillSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               outputInfo,
+                               descriptor);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+}
+
+bool Converter::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertFloor()";
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Operation has invalid outputs", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsFloorSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input.GetTensorInfo(),
+                                   outputInfo);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertFullyConnected()";
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    LayerInputHandle weightsInput = LayerInputHandle();
+    const Operand* weightsOperand = GetInputOperand(operation, 1, model);
+    if (!weightsOperand)
+    {
+        return Fail("%s: Could not read weights", __func__);
+    }
+
+    // If weights are constant a separate constant layer will be created to store data.
+    // Otherwise handle non const weights as inputs.
+    weightsInput = ConvertToLayerInputHandle(operation, 1, model, data);
+    if (!weightsInput.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    LayerInputHandle biasInput = LayerInputHandle();
+    const Operand* biasOperand = GetInputOperand(operation, 2, model);
+    if (!biasOperand)
+    {
+        return Fail("%s: Could not read bias", __func__);
+    }
+
+    // If bias are constant a separate constant layer will be created to store data.
+    // Otherwise handle non const bias as inputs.
+    biasInput = ConvertToLayerInputHandle(operation, 2, model, data); // 1D
+    if (!biasInput.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
+    armnn::TensorInfo reshapedInfo = inputInfo;
+    try
+    {
+        reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
+    }
+    catch (const std::exception& e)
+    {
+        return Fail("%s: %s", __func__, e.what());
+    }
+
+    // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
+    armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
+    SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
+
+    ActivationFn activationFunction;
+    if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    armnn::FullyConnectedDescriptor desc;
+    desc.m_TransposeWeightMatrix = true;
+    desc.m_BiasEnabled           = true;
+    desc.m_ConstantWeights       = IsOperandConstant(*weightsOperand);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
+                                        weightsInfo.GetShape(),
+                                        outputInfo.GetShape(),
+                                        desc.m_TransposeWeightMatrix))
+        {
+            isSupported = false;
+            Fail("%s: Expected outputShape does not match actual outputShape", __func__);
+            return;
+        }
+
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsFullyConnectedSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   reshapedInfo,
+                                   outputInfo,
+                                   weightsInfo,
+                                   biasInfo,
+                                   desc);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
+    armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
+
+    if (inputInfo.GetNumDimensions() > 2U)
+    {
+        armnn::ReshapeDescriptor reshapeDescriptor;
+        reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
+
+        armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+        assert(reshapeLayer != nullptr);
+        input.Connect(reshapeLayer->GetInputSlot(0));
+        reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
+        reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
+    }
+    else
+    {
+        input.Connect(startLayer->GetInputSlot(0));
+    }
+
+    // Connect weights and bias inputs
+    weightsInput.Connect(startLayer->GetInputSlot(1));
+    biasInput.Connect(startLayer->GetInputSlot(2));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
+                                        data, nullptr, validateFunc, activationFunction);
+}
+
+bool Converter::ConvertGather(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertGather()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid input", __func__);
+    }
+    auto inputDimensions = input.GetTensorInfo().GetNumDimensions();
+
+    LayerInputHandle indices = ConvertToLayerInputHandle(operation, 2, model, data);
+    if (!indices.IsValid())
+    {
+        return Fail("%s: Operation has invalid indices", __func__);
+    }
+    auto indicesDimensions = indices.GetTensorInfo().GetNumDimensions();
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Operation has invalid output", __func__);
+    }
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+    auto outputDimensions = outputInfo.GetNumDimensions();
+    if (outputDimensions != inputDimensions + indicesDimensions - 1)
+    {
+        return Fail("%s: Operation has invalid output dimensions: %d. Output must be an (%d + %d - 1)-D tensor",
+                     __func__, outputDimensions, inputDimensions, indicesDimensions);
+    }
+
+    int32_t axis;
+    if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
+    {
+        return Fail("%s: Operation has invalid or unsupported axis operand", __func__);
+    }
+    if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
+    {
+        return Fail("%s: Operation has invalid axis: %d. It is out of bounds [-%d, %d))", __func__, axis,
+                    inputDimensions, inputDimensions);
+    }
+
+    GatherDescriptor desc;
+    desc.m_Axis = axis;
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsGatherSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input.GetTensorInfo(),
+                                   indices.GetTensorInfo(),
+                                   outputInfo,
+                                   desc);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddGatherLayer(desc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+    indices.Connect(layer->GetInputSlot(1));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertGroupedConv2d()";
+    //
+    // Parse data
+    //
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+    const TensorInfo& inputInfo  = input.GetTensorInfo();
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+    TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+
+    // Look ahead to determine data layout
+    DataLayout dataLayout = DataLayout::NHWC;
+    if (operation.inputs.size() == 12)
+    {
+        dataLayout = OptionalDataLayout(operation, 11, model, data);
+    }
+    else
+    {
+        dataLayout = OptionalDataLayout(operation, 8, model, data);
+    }
+
+    // NOTE:
+    // NNAPI weights are always OHWI, i.e. [depth_out, filter_height, filter_width, depth_group],
+    // but Arm NN expects the filter's height and width indices to match the input's height and
+    // width indices so when the DataLayout is NCHW, we need to permute the weights to OIHW
+    const PermutationVector ohwiToOihw = { 0u, 2u, 3u, 1u };
+    const ConstTensorPin weightsPin = (dataLayout == DataLayout::NCHW) ?
+                                      ConvertOperationInputToConstTensorPin(operation, 1,
+                                                                                       model, data, ohwiToOihw) :
+                                      ConvertOperationInputToConstTensorPin(operation, 1, model, data);
+    const ConstTensorPin biasesPin  =
+        ConvertOperationInputToConstTensorPin(operation, 2, model, data);
+    if (!weightsPin.IsValid() || !biasesPin.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    ConstTensor weights = weightsPin.GetConstTensor();
+    ConstTensor biases  = biasesPin.GetConstTensor();
+    SanitizeBiasQuantizationScale(biases.GetInfo(), weights.GetInfo(), inputInfo);
+
+    const TensorShape& inputShape   = inputInfo.GetShape();
+    const TensorShape& outputShape  = outputInfo.GetShape();
+    const TensorShape& weightsShape = weights.GetShape();
+    const TensorShape& biasesShape  = biases.GetShape();
+
+    armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
+    const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
+    const unsigned int heightIndex   = dataLayoutIndexed.GetHeightIndex();
+    const unsigned int widthIndex    = dataLayoutIndexed.GetWidthIndex();
+
+    Convolution2dDescriptor desc;
+    desc.m_DataLayout  = dataLayout;
+    desc.m_BiasEnabled = true;
+
+    int numGroups;
+    ActivationFn activation;
+
+    if (operation.inputs.size() == 12)
+    {
+        if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
+            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
+            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
+            !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
+            !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
+            !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
+            !GetInputScalar(operation, 9, OperandType::INT32, numGroups, model, data) ||
+            !GetInputActivationFunction(operation, 10, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
+        }
+
+    }
+    else if (operation.inputs.size() == 9)
+    {
+        ::android::nn::PaddingScheme paddingScheme;
+        if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
+            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
+            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
+            !GetInputScalar(operation, 6, OperandType::INT32, numGroups, model, data) ||
+            !GetInputActivationFunction(operation, 7, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
+        }
+
+        const uint32_t inputX = inputInfo.GetShape()[widthIndex];
+        const uint32_t inputY = inputInfo.GetShape()[heightIndex];
+
+        const uint32_t kernelX = weightsShape[widthIndex];
+        const uint32_t kernelY = weightsShape[heightIndex];
+
+        CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
+        CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
+    }
+    else
+    {
+        return Fail("%s: Unsupported number of operation inputs", __func__);
+    }
+
+    // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
+    const unsigned int outputChannels = weightsShape[0];
+
+    const unsigned int channelsPerGroup  = weightsShape[channelsIndex];
+    const unsigned int channelMultiplier = outputChannels / numGroups;
+
+    //
+    // Validate all relevant inputs
+    //
+    if (numGroups <= 0)
+    {
+        return Fail("%s: Number of groups must be greater than 0. Got: %d", __func__, numGroups);
+    }
+
+    if (outputChannels % numGroups != 0u)
+    {
+        return Fail("%s: Output channels must be divisible by the number of groups", __func__);
+    }
+
+    //
+    // Set up Splitter layer
+    //
+    unsigned int splitterDimSizes[4] = { inputShape[0], inputShape[1], inputShape[2], inputShape[3] };
+    splitterDimSizes[channelsIndex] /= numGroups; // split in depth
+
+    TensorInfo splitterOutputInfo(4,
+                                  splitterDimSizes,
+                                  inputInfo.GetDataType(),
+                                  inputInfo.GetQuantizationScale(),
+                                  inputInfo.GetQuantizationOffset());
+
+    std::vector<std::reference_wrapper<TensorInfo>> splitterOutputInfos(numGroups, std::ref(splitterOutputInfo));
+
+    ViewsDescriptor splitterDesc(numGroups);
+    for (unsigned int group = 0u; group < numGroups; ++group)
+    {
+        splitterDesc.SetViewOriginCoord(group, channelsIndex, splitterDimSizes[channelsIndex] * group);
+        for (unsigned int dimIdx = 0u; dimIdx < 4u; dimIdx++)
+        {
+            splitterDesc.SetViewSize(group, dimIdx, splitterDimSizes[dimIdx]);
+        }
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsSplitterSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               splitterOutputInfos,
+                               splitterDesc);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* splitterLayer = data.m_Network->AddSplitterLayer(splitterDesc);
+    if (!splitterLayer)
+    {
+        return Fail("%s: Failed to add SplitterLayer", __func__);
+    }
+
+    input.Connect(splitterLayer->GetInputSlot(0));
+    for (unsigned int group = 0u; group < splitterLayer->GetNumOutputSlots(); ++group)
+    {
+        splitterLayer->GetOutputSlot(group).SetTensorInfo(splitterOutputInfo);
+    }
+
+    //
+    // Set up Convolution2d layers for each group
+    //
+
+    // Set up group tensor shapes
+    TensorShape groupInputShape(inputShape);
+    groupInputShape[channelsIndex] = channelsPerGroup;
+
+    TensorShape groupWeightsShape(weightsShape);
+    groupWeightsShape[0] /= channelMultiplier * numGroups;
+
+    TensorShape groupBiasesShape({ 1 });
+
+    // Set up group tensor infos
+    TensorInfo groupInputInfo(inputInfo);
+    groupInputInfo.SetShape(groupInputShape);
+
+    const TensorInfo& weightsInfo = weights.GetInfo();
+    TensorInfo groupWeightsInfo(weightsInfo);
+    groupWeightsInfo.SetShape(groupWeightsShape);
+
+    const TensorInfo& biasesInfo = biases.GetInfo();
+    TensorInfo groupBiasesInfo(biasesInfo);
+    groupBiasesInfo.SetShape(groupBiasesShape);
+
+    TensorInfo groupOutputInfo(outputInfo);
+
+    TensorShape groupOutputShape(outputShape);
+    const bool isDynamic = IsDynamicTensor(outputInfo);
+    if (!isDynamic)
+    {
+        groupOutputShape[channelsIndex] = 1;
+    }
+    groupOutputInfo.SetShape(groupOutputShape);
+
+    const unsigned int weightsDataTypeSize = GetDataTypeSize(groupWeightsInfo.GetDataType());
+    const unsigned int biasesDataTypeSize  = GetDataTypeSize(groupBiasesInfo.GetDataType());
+
+    std::vector<IConnectableLayer*> convLayers(numGroups * channelMultiplier, nullptr);
+    for (unsigned int group = 0u; group < numGroups; ++group)
+    {
+        for (unsigned int m = 0u; m < channelMultiplier; ++m)
+        {
+            auto index = group * channelMultiplier + m;
+
+            const unsigned int weightsDataOffset = groupWeightsShape.GetNumElements() * index * weightsDataTypeSize;
+            const unsigned int biasesDataOffset = groupBiasesShape.GetNumElements() * index * biasesDataTypeSize;
+
+            if (weightsInfo.HasPerAxisQuantization())
+            {
+                // Extract per-axis quantization scales for group weights
+                const std::vector<float>& weightsQuantScales = weightsInfo.GetQuantizationScales();
+                groupWeightsInfo.SetQuantizationScales(
+                    std::vector<float>(weightsQuantScales.begin() + index,
+                                       weightsQuantScales.begin() + index + groupWeightsShape[0]));
+
+                // Extract per-axis quantization scales for group biases
+                const std::vector<float>& biasesQuantScales  = biasesInfo.GetQuantizationScales();
+                groupBiasesInfo.SetQuantizationScales(
+                    std::vector<float>(biasesQuantScales.begin() + index,
+                                       biasesQuantScales.begin() + index + groupWeightsShape[0]));
+            }
+
+            // Extract weights and biases data for current group convolution
+            ConstTensor groupWeights(groupWeightsInfo,
+                                     static_cast<const void *>(reinterpret_cast<const char *>(weights.GetMemoryArea()) +
+                                                               weightsDataOffset));
+            ConstTensor groupBiases(groupBiasesInfo,
+                                    static_cast<const void *>(reinterpret_cast<const char *>(biases.GetMemoryArea()) +
+                                                              biasesDataOffset));
+
+            isSupported = false;
+            auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+            {
+                FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                           IsConvolution2dSupported,
+                                           data.m_Backends,
+                                           isSupported,
+                                           groupInputInfo,
+                                           outputInfo,
+                                           desc,
+                                           groupWeightsInfo,
+                                           Optional<TensorInfo>(groupBiasesInfo));
+            };
+
+            if(!isDynamic)
+            {
+                validateFunc(groupOutputInfo, isSupported);
+            }
+            else
+            {
+                isSupported = AreDynamicTensorsSupported();
+            }
+
+            if (!isSupported)
+            {
+                return false;
+            }
+            ARMNN_NO_DEPRECATE_WARN_BEGIN
+            IConnectableLayer* convLayer =
+                data.m_Network->AddConvolution2dLayer(desc, groupWeights, Optional<ConstTensor>(groupBiases));
+            ARMNN_NO_DEPRECATE_WARN_END
+            if (!convLayer)
+            {
+                return Fail("%s: AddConvolution2dLayer failed", __func__);
+            }
+
+            splitterLayer->GetOutputSlot(group).Connect(convLayer->GetInputSlot(0));
+            convLayer->GetOutputSlot(0).SetTensorInfo(groupOutputInfo);
+
+            if(isDynamic)
+            {
+                convLayer->GetOutputSlot(0).IsTensorInfoSet();
+
+                validateFunc(convLayer->GetOutputSlot(0).GetTensorInfo(), isSupported);
+
+                outputInfo = convLayer->GetOutputSlot(0).GetTensorInfo();
+
+                if (!isSupported)
+                {
+                    return false;
+                }
+            }
+
+            convLayers[index] = convLayer;
+        }
+    }
+
+    //
+    // Set up Concat layer
+    //
+    ConcatDescriptor concatDescriptor;
+    // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
+    concatDescriptor = ConcatDescriptor(weightsShape[0]);
+    for (unsigned int group = 0u; group < numGroups; ++group)
+    {
+        for (unsigned int m = 0u; m < channelMultiplier; ++m)
+        {
+            auto index = group * channelMultiplier + m;
+            concatDescriptor.SetViewOriginCoord(index, channelsIndex, index);
+            concatDescriptor.SetConcatAxis(channelsIndex);
+        }
+    }
+
+    isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsConcatSupported,
+                               data.m_Backends,
+                               isSupported,
+                               std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
+                               outputInfo,
+                               concatDescriptor);
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* concatLayer = data.m_Network->AddConcatLayer(concatDescriptor);
+    if (!concatLayer)
+    {
+        return Fail("%s: AddConcatLayer failed", __func__);
+    }
+
+    for (unsigned int group = 0u; group < numGroups; ++group)
+    {
+        for (unsigned int m = 0u; m < channelMultiplier; ++m)
+        {
+            auto index = group * channelMultiplier + m;
+            convLayers[index]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(index));
+        }
+    }
+    concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *concatLayer, model,
+                                                   data, nullptr, nullptr, activation);
+}
+
+bool Converter::ConvertHardSwish(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertHardSwish()";
+    ActivationDescriptor desc;
+    desc.m_Function = ActivationFunction::HardSwish;
+
+    return ::ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool Converter::ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertInstanceNormalization()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has an invalid input 0", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Operation has an invalid output", __func__);
+    }
+
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    // Determine data type of input tensor
+    OperandType inputType;
+    if (!GetOperandType(operation, 0, model, inputType))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    InstanceNormalizationDescriptor desc;
+
+    // Read gamma, beta & epsilon
+    if (inputType == OperandType::TENSOR_FLOAT16)
+    {
+        Half fp16Gamma;
+        Half fp16Beta;
+        Half fp16Epsilon;
+
+        if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Gamma, model, data) ||
+            !GetInputScalar(operation, 2, OperandType::FLOAT16, fp16Beta, model, data) ||
+            !GetInputScalar(operation, 3, OperandType::FLOAT16, fp16Epsilon, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
+        }
+
+        desc.m_Gamma = static_cast<float>(fp16Gamma);
+        desc.m_Beta  = static_cast<float>(fp16Beta);
+        desc.m_Eps   = static_cast<float>(fp16Epsilon);
+    }
+    else if (inputType == OperandType::TENSOR_FLOAT32)
+    {
+        if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_Gamma, model, data) ||
+            !GetInputScalar(operation, 2, OperandType::FLOAT32, desc.m_Beta, model, data) ||
+            !GetInputScalar(operation, 3, OperandType::FLOAT32, desc.m_Eps, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
+        }
+    }
+    else
+    {
+        return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
+    }
+
+    desc.m_DataLayout = OptionalDataLayout(operation, 4, model, data);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsInstanceNormalizationSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input.GetTensorInfo(),
+                                   outputInfo,
+                                   desc);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddInstanceNormalizationLayer(desc);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertL2Normalization()";
+
+    if (operation.inputs.size() != 1)
+    {
+        return Fail("%s: Optional inputs are not supported", __func__);
+    }
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    if (outputInfo.GetNumDimensions() != 4u)
+    {
+        return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
+    }
+
+    armnn::L2NormalizationDescriptor desc;
+    desc.m_DataLayout = armnn::DataLayout::NHWC;
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsL2NormalizationSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   desc);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertL2Pool2d()";
+    return ConvertPooling2d(operation, __func__, PoolingAlgorithm::L2, model, data);
+}
+
+bool Converter::ConvertLocalResponseNormalization(const Operation& operation,
+                                                  const Model& model,
+                                                  ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertLocalResponseNormalization()";
+
+    if (operation.inputs.size() != 5)
+    {
+        return Fail("%s: Optional inputs are not supported", __func__);
+    }
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    if (outputInfo.GetNumDimensions() != 4u)
+    {
+        return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
+    }
+
+    armnn::NormalizationDescriptor descriptor;
+    descriptor.m_DataLayout      = armnn::DataLayout::NHWC;
+    descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
+    descriptor.m_NormMethodType  = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+
+    if (!input.IsValid() ||
+        !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
+        !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
+        !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
+        !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // ArmNN expects normSize to be the full size of the normalization
+    // window rather than the radius as in AndroidNN.
+    descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsNormalizationSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertLogicalBinary(const Operation& operation,
+                                     const Model& model,
+                                     ConversionData& data,
+                                     armnn::LogicalBinaryOperation logicalOperation)
+{
+    VLOG(DRIVER) << "Converter::ConvertLogicalBinary()";
+    VLOG(DRIVER) << "ConvertLogicalBinary()";
+    VLOG(DRIVER) << "logicalOperation = " << GetLogicalBinaryOperationAsCString(logicalOperation);
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!(input0.IsValid() && input1.IsValid()))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const TensorInfo& inputInfo0 = input0.GetTensorInfo();
+    const TensorInfo& inputInfo1 = input1.GetTensorInfo();
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    LogicalBinaryDescriptor descriptor(logicalOperation);
+
+    bool isSupported = false;
+
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsLogicalBinarySupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo0,
+                                   inputInfo1,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
+    assert(layer != nullptr);
+
+    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
+    if (!isReshapeSupported)
+    {
+        return false;
+    }
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertLogistic()";
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::Sigmoid;
+
+    return ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool Converter::ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertLogSoftmax()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Failed to read input 0", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Failed to read output", __func__);
+    }
+
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    // Determine data type of input tensor
+    OperandType inputType;
+    if (!GetOperandType(operation, 0, model, inputType))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    LogSoftmaxDescriptor descriptor;
+
+    // Read beta
+    if (inputType == OperandType::TENSOR_FLOAT16)
+    {
+        Half fp16Beta;
+        if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Beta, model, data))
+        {
+            return Fail("%s: Failed to read input 1 (FLOAT16)", __func__);
+        }
+
+        descriptor.m_Beta  = static_cast<float>(fp16Beta);
+    }
+    else if (inputType == OperandType::TENSOR_FLOAT32)
+    {
+        if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Beta, model, data))
+        {
+            return Fail("%s: Failed to read input 1 (FLOAT32)", __func__);
+        }
+    }
+    else
+    {
+        return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
+    }
+
+    // Read axis
+    if (!GetInputInt32(operation, 2, descriptor.m_Axis, model, data))
+    {
+        return Fail("%s: Failed to read input 2", __func__);
+    }
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsLogSoftmaxSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input.GetTensorInfo(),
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor);
+    if (!layer)
+    {
+        return Fail("%s: AddLogSoftmaxLayer() returned nullptr", __func__);
+    }
+
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertLstm()";
+
+    // Inputs:
+    // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
+    //      “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0: input", __func__);
+    }
+    // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
+    LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
+    if (!outputStateIn.IsValid())
+    {
+        return Fail("%s: Could not read input 18: outputStateIn", __func__);
+    }
+    // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
+    LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
+    if (!cellStateIn.IsValid())
+    {
+        return Fail("%s: Could not read input 19: cellStateIn", __func__);
+    }
+
+    // Get the mandatory input tensors:
+    // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, input_size].
+    const ConstTensorPin inputToForgetWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 2));
+    // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    // [num_units, input_size].
+    const ConstTensorPin inputToCellWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 3));
+    // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, input_size].
+    const ConstTensorPin inputToOutputWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 4));
+    // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, output_size].
+    const ConstTensorPin recurrentToForgetWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 6));
+    // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, output_size].
+    const ConstTensorPin recurrentToCellWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 7));
+    // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, output_size].
+    const ConstTensorPin recurrentToOutputWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 8));
+    // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin forgetGateBiasPin =
+        ConvertOperationInputToConstTensorPin(operation, 13, model, data);
+    // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin cellBiasPin =
+        ConvertOperationInputToConstTensorPin(operation, 14, model, data);
+    // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin outputGateBiasPin =
+        ConvertOperationInputToConstTensorPin(operation, 15, model, data);
+
+    if (!inputToForgetWeightsPin.IsValid() ||
+        !inputToCellWeightsPin.IsValid() ||
+        !inputToOutputWeightsPin.IsValid() ||
+        !recurrentToForgetWeightsPin.IsValid() ||
+        !recurrentToCellWeightsPin.IsValid() ||
+        !recurrentToOutputWeightsPin.IsValid() ||
+        !forgetGateBiasPin.IsValid() ||
+        !cellBiasPin.IsValid() ||
+        !outputGateBiasPin.IsValid())
+    {
+        return Fail("%s: Operation has invalid tensor inputs", __func__);
+    }
+
+    // Get the optional input tensors:
+    // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
+    const ConstTensorPin inputToInputWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 1, true));
+    // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
+    //     “num_units”), or the second dimension of the “projection_weights”, if defined.
+    const ConstTensorPin recurrentToInputWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 5, true));
+    // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin cellToInputWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 9, true));
+    // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin cellToForgetWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 10, true));
+    // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin cellToOutputWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 11, true));
+    // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
+    const ConstTensorPin inputGateBiasPin =
+        ConvertOperationInputToConstTensorPin(operation,
+                                                         12,
+                                                         model,
+                                                         data,
+                                                         g_DontPermute,
+                                                         nullptr,
+                                                         true);
+
+    // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
+    //     [output_size, num_units].
+    const ConstTensorPin projectionWeightsPin =
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 16, true));
+    // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
+    const ConstTensorPin projectionBiasPin =
+        ConvertOperationInputToConstTensorPin(operation,
+                                                         17,
+                                                         model,
+                                                         data,
+                                                         g_DontPermute,
+                                                         nullptr,
+                                                         true);
+
+    if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
+        (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
+        (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
+        (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
+        (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
+        (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
+        (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
+        (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
+    {
+        return Fail("%s: Operation has invalid tensor inputs", __func__);
+    }
+
+    // Get the mandatory input scalars (actually 1-D tensors of size 1):
+    // 20: The activation function: A value indicating the activation function:
+    //     0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
+    // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
+    //     If set to 0.0 then clipping is disabled.
+    // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
+    //     [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
+    ActivationFn activation = ActivationFn::kActivationNone;
+    float cellClip;
+    float projClip;
+    if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
+        !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
+        !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
+    {
+        return Fail("%s: Operation has invalid scalar inputs", __func__);
+    }
+
+    // Get the normalization tensors
+    // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
+    //     Used to rescale normalized inputs to activation at input gate.
+    const ConstTensorPin inputLayerNormWeightsPin
+        (DequantizeAndMakeConstTensorPin(operation, model, data, 23, true));
+
+    // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
+    //     Used to rescale normalized inputs to activation at forget gate.
+    const ConstTensorPin forgetLayerNormWeightsPin =
+        ConvertOperationInputToConstTensorPin(operation,
+                                                        24,
+                                                        model,
+                                                        data,
+                                                        g_DontPermute,
+                                                        nullptr,
+                                                        true);
+
+    // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
+    //     Used to rescale normalized inputs to activation at cell gate.
+    const ConstTensorPin cellLayerNormWeightsPin =
+        ConvertOperationInputToConstTensorPin(operation,
+                                                         25,
+                                                         model,
+                                                         data,
+                                                         g_DontPermute,
+                                                         nullptr,
+                                                         true);
+
+    // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
+    //     Used to rescale normalized inputs to activation at output gate.
+    const ConstTensorPin outputLayerNormWeightsPin =
+        ConvertOperationInputToConstTensorPin(operation,
+                                                         26,
+                                                         model,
+                                                         data,
+                                                         g_DontPermute,
+                                                         nullptr,
+                                                         true);
+
+    // Outputs:
+    // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
+    // with CIFG, or [batch_size, num_units * 3] without CIFG.
+    const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
+    if (!scratchBuffer)
+    {
+        return Fail("%s: Could not read output 0: scratchBuffer", __func__);
+    }
+    // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
+    const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
+    if (!outputStateOut)
+    {
+        return Fail("%s: Could not read output 1: outputStateOut", __func__);
+    }
+    // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
+    const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
+    if (!cellStateOut)
+    {
+        return Fail("%s: Could not read output 2: cellStateOut", __func__);
+    }
+    // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
+    //     effectively the same as the current “output state (out)” value.
+    const Operand* output = GetOutputOperand(operation, 3, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 3: output", __func__);
+    }
+
+    // set the params structure for the AddLstmLayer call
+    LstmInputParams params;
+    params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
+    params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
+    params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
+    params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
+    params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
+    params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
+    params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
+    params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
+    params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
+    params.m_CellBias = cellBiasPin.GetConstTensorPtr();
+    params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
+    params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
+    params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
+    params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
+    params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
+    params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
+    params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
+
+    // set the layer descriptor
+    LstmDescriptor desc;
+    desc.m_ActivationFunc = activation;
+    desc.m_ClippingThresCell = cellClip;
+    desc.m_ClippingThresProj = projClip;
+    desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
+                          params.m_RecurrentToInputWeights == nullptr ||
+                          params.m_InputGateBias == nullptr);
+    desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
+                              params.m_CellToOutputWeights != nullptr);
+    desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
+    desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
+                               params.m_ForgetLayerNormWeights != nullptr ||
+                               params.m_CellLayerNormWeights != nullptr ||
+                               params.m_OutputLayerNormWeights != nullptr);
+
+    // validate the optional input groups
+    if (desc.m_CifgEnabled &&
+        (params.m_InputToInputWeights != nullptr ||
+         params.m_RecurrentToInputWeights != nullptr ||
+         params.m_InputGateBias != nullptr))
+    {
+        return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
+                    " and input gate bias must be provided", __func__);
+    }
+
+    if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
+    {
+        return Fail("%s: projection bias should not be provided without projection weights", __func__);
+    }
+
+    if (desc.m_PeepholeEnabled &&
+        (params.m_CellToForgetWeights == nullptr ||
+         params.m_CellToOutputWeights == nullptr ||
+         (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
+    {
+        return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
+                    " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
+    }
+
+    if (desc.m_LayerNormEnabled &&
+        (params.m_ForgetLayerNormWeights == nullptr ||
+         params.m_CellLayerNormWeights == nullptr ||
+         params.m_OutputLayerNormWeights == nullptr ||
+         (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
+    {
+        return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
+                    " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
+    }
+
+    // Check if the layer is supported
+    // Inputs
+    const TensorInfo& inputInfo         = input.GetTensorInfo();
+    const TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
+    const TensorInfo& cellStateInInfo   = cellStateIn.GetTensorInfo();
+
+    // Outputs
+    const TensorInfo& scratchBufferInfo  = GetTensorInfoForOperand(*scratchBuffer);
+    const TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
+    const TensorInfo& cellStateOutInfo   = GetTensorInfoForOperand(*cellStateOut);
+    const TensorInfo& outputInfo         = GetTensorInfoForOperand(*output);
+
+    // Basic parameters
+    LstmInputParamsInfo paramsInfo;
+    paramsInfo.m_InputToForgetWeights     = &(params.m_InputToForgetWeights->GetInfo());
+    paramsInfo.m_InputToCellWeights       = &(params.m_InputToCellWeights->GetInfo());
+    paramsInfo.m_InputToOutputWeights     = &(params.m_InputToOutputWeights->GetInfo());
+    paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
+    paramsInfo.m_RecurrentToCellWeights   = &(params.m_RecurrentToCellWeights->GetInfo());
+    paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
+    paramsInfo.m_ForgetGateBias           = &(params.m_ForgetGateBias->GetInfo());
+    paramsInfo.m_CellBias                 = &(params.m_CellBias->GetInfo());
+    paramsInfo.m_OutputGateBias           = &(params.m_OutputGateBias->GetInfo());
+
+    // Optional parameters
+    if (!desc.m_CifgEnabled)
+    {
+        paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
+        paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
+        if (params.m_CellToInputWeights != nullptr)
+        {
+            paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
+        }
+        paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
+    }
+
+    if (desc.m_ProjectionEnabled)
+    {
+        paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
+        if (params.m_ProjectionBias != nullptr)
+        {
+            paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
+        }
+    }
+
+    if (desc.m_PeepholeEnabled)
+    {
+        paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
+        paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
+    }
+
+    if (desc.m_LayerNormEnabled)
+    {
+        if(!desc.m_CifgEnabled)
+        {
+            paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
+        }
+        paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
+        paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
+        paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
+    }
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsLstmSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputStateInInfo,
+                                   cellStateInInfo,
+                                   scratchBufferInfo,
+                                   outputStateOutInfo,
+                                   cellStateOutInfo,
+                                   outputInfo,
+                                   desc,
+                                   paramsInfo);
+    };
+
+    bool isDynamic = false;
+    if (!IsDynamicTensor(outputStateOutInfo) &&
+        !IsDynamicTensor(scratchBufferInfo)  &&
+        !IsDynamicTensor(cellStateOutInfo)   &&
+        !IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isDynamic = true;
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    // Add the layer
+    IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
+
+    input.Connect(layer->GetInputSlot(0));
+    outputStateIn.Connect(layer->GetInputSlot(1));
+    cellStateIn.Connect(layer->GetInputSlot(2));
+
+    if (!isDynamic)
+    {
+        return (
+             SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
+             SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
+             SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
+             SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
+    }
+    else
+    {
+        return (
+             SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
+             SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
+             SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
+             SetupAndTrackLayerOutputSlot(
+                 operation, 3, *layer, 3, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
+    }
+
+}
+
+bool Converter::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertMaxPool2d()";
+    return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Max, model, data);
+}
+
+bool Converter::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertMaximum()";
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Could not read output", __func__);
+    }
+
+    const TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsMaximumSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input0.GetTensorInfo(),
+                                   input1.GetTensorInfo(),
+                                   outInfo);
+    };
+
+    if(IsDynamicTensor(outInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
+    assert(layer != nullptr);
+    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
+    if (!isReshapeSupported)
+    {
+        return false;
+    }
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertMean()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    const Operand* axisOperand = GetInputOperand(operation, 1, model);
+    if (!axisOperand)
+    {
+        return Fail("%s: Could not read input 1", __func__);
+    }
+
+    std::vector<int32_t> axis;
+    if (!GetTensorInt32Values(*axisOperand, axis, model, data))
+    {
+        return Fail("%s: Input 1 has invalid values", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+
+    // Convert the axis to unsigned int and remove duplicates.
+    unsigned int rank = inputInfo.GetNumDimensions();
+    std::set<unsigned int> uniqueAxis;
+    std::transform(axis.begin(), axis.end(),
+                   std::inserter(uniqueAxis, uniqueAxis.begin()),
+                   [rank](int i) -> unsigned int { return (i + rank) % rank; });
+
+    // Get the "keep dims" flag.
+    int32_t keepDims = 0;
+    if (!GetInputInt32(operation, 2, keepDims, model, data))
+    {
+        return Fail("%s: Could not read input 2", __func__);
+    }
+
+    armnn::MeanDescriptor descriptor;
+    descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
+    descriptor.m_KeepDims = keepDims > 0;
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsMeanSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertMinimum()";
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsMinimumSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input0.GetTensorInfo(),
+                                   input1.GetTensorInfo(),
+                                   outputInfo);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
+    assert(layer != nullptr);
+    bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
+    if (!isReshapeSupported)
+    {
+        return false;
+    }
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertMul()";
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2
+    // and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+
+    if (outputOperand == nullptr)
+    {
+        return false;
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsMultiplicationSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input0.GetTensorInfo(),
+                                   input1.GetTensorInfo(),
+                                   outputInfo);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
+
+    bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
+    if (!isReshapeSupported)
+    {
+        return false;
+    }
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
+                                        data, nullptr, validateFunc, activationFunction);
+}
+
+bool Converter::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertPad()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+
+    armnn::PadDescriptor descriptor;
+    if (!ConvertPaddings(operation, model, data, rank, descriptor))
+    {
+        return Fail("%s: Could not convert paddings", __func__);
+    }
+
+    // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
+    // the scale and zeroPoint must be the same as input0
+    // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
+    // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
+    // (QuantizationOffset - QuantizationOffset) * scale = 0.
+    if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
+    {
+        descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsPadSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertPadV2()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output", __func__);
+    }
+
+    const TensorInfo& inputInfo  = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+
+    PadDescriptor descriptor;
+    if (!ConvertPaddings(operation, model, data, rank, descriptor))
+    {
+        return Fail("%s: Could not convert paddings", __func__);
+    }
+
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    // Determine type of padding value
+    OperandType operandType0;
+    OperandType operandType2;
+
+    if (!GetOperandType(operation, 0, model, operandType0) ||
+        !GetOperandType(operation, 2, model, operandType2))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // Read value to use for padding
+    if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
+    {
+        Half f16PadValue;
+        if (!GetInputScalar(operation, 2, operandType2, f16PadValue, model, data))
+        {
+            return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
+        }
+
+        descriptor.m_PadValue = f16PadValue;
+    }
+    else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
+    {
+        if (!GetInputFloat32(operation, 2, descriptor.m_PadValue, model, data))
+        {
+            return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
+        }
+    }
+    else if (isQuantizedOperand(operandType0) && operandType2 == OperandType::INT32)
+    {
+        int32_t intPadValue = 0;
+        if (!GetInputInt32(operation, 2, intPadValue, model, data))
+        {
+            return Fail("%s: Could not read input 2 (INT32)", __func__);
+        }
+        descriptor.m_PadValue = intPadValue;
+    }
+    else
+    {
+        return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
+    }
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsPadSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertPrelu()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle alpha = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!input.IsValid() || !alpha.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+
+    if (!output)
+    {
+        return Fail("%s: Could not read output", __func__);
+    }
+
+    const TensorInfo& inputInfo  = input.GetTensorInfo();
+    const TensorInfo& alphaInfo  = alpha.GetTensorInfo();
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsPreluSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   alphaInfo,
+                                   outputInfo);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
+
+    if (!layer)
+    {
+        return Fail("%s: AddPreluLayer failed", __func__);
+    }
+
+    bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
+    if (!isReshapeSupported)
+    {
+        return false;
+    }
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertQuantize()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid input", __func__);
+    }
+
+    const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Operation has invalid outputs", __func__);
+    }
+
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsQuantizeSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input.GetTensorInfo(),
+                                   outputInfo);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertQuantizedLstm()";
+
+    VLOG(DRIVER) << "ConvertQuantizedLstm()";
+
+    //Inputs:
+    // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
+    //    specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0: input", __func__);
+    }
+
+    // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, of shape [batch_size, output_size].
+    LayerInputHandle outputStatePrevTimeStep = ConvertToLayerInputHandle(operation, 18, model, data);
+    if (!outputStatePrevTimeStep.IsValid())
+    {
+        return Fail("%s: Could not read input 18: outputStatePrevTimeStep", __func__);
+    }
+
+    // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
+    LayerInputHandle cellStatePrevTimeStep = ConvertToLayerInputHandle(operation, 19, model, data);
+    if (!cellStatePrevTimeStep.IsValid())
+    {
+        return Fail("%s: Could not read input 19: cellStatePrevTimeStep", __func__);
+    }
+
+    // Get the mandatory input tensors:
+
+    // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
+    //     [num_units, input_size].
+    const ConstTensorPin inputToForgetWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation, 2, model, data);
+
+    // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
+    // [num_units, input_size].
+    const ConstTensorPin inputToCellWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation, 3, model, data);
+
+    // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
+    //     [num_units, input_size].
+    const ConstTensorPin inputToOutputWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation, 4, model, data);
+
+    // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
+    //     [num_units, output_size].
+    const ConstTensorPin recurrentToForgetWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation, 6, model, data);
+
+    // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
+    //     [num_units, output_size].
+    const ConstTensorPin recurrentToCellWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation, 7, model, data);
+
+    // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
+    //     [num_units, output_size].
+    const ConstTensorPin recurrentToOutputWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation, 8, model, data);
+
+    // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
+    const ConstTensorPin forgetGateBiasPin =
+            ConvertOperationInputToConstTensorPin(operation, 13, model, data);
+
+    // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
+    const ConstTensorPin cellBiasPin =
+            ConvertOperationInputToConstTensorPin(operation, 14, model, data);
+
+    // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
+    const ConstTensorPin outputGateBiasPin =
+            ConvertOperationInputToConstTensorPin(operation, 15, model, data);
+
+    if (!inputToForgetWeightsPin.IsValid() ||
+        !inputToCellWeightsPin.IsValid() ||
+        !inputToOutputWeightsPin.IsValid() ||
+        !recurrentToForgetWeightsPin.IsValid() ||
+        !recurrentToCellWeightsPin.IsValid() ||
+        !recurrentToOutputWeightsPin.IsValid() ||
+        !forgetGateBiasPin.IsValid() ||
+        !cellBiasPin.IsValid() ||
+        !outputGateBiasPin.IsValid())
+    {
+        return Fail("%s: Operation has invalid tensor inputs", __func__);
+    }
+
+    // Get the optional input tensors:
+
+    // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
+    //     [num_units, input_size], where “num_units” corresponds to the number of cell units.
+    const ConstTensorPin inputToInputWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  1,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
+    //     [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
+    //     “num_units”), or the second dimension of the “projection_weights”, if defined.
+    const ConstTensorPin recurrentToInputWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  5,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
+    // [num_units].
+    const ConstTensorPin cellToInputWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  9,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
+    // [num_units].
+    const ConstTensorPin cellToForgetWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  10,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
+    // [num_units].
+    const ConstTensorPin cellToOutputWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  11,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
+    const ConstTensorPin inputGateBiasPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  12,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
+    //     [output_size, num_units].
+    const ConstTensorPin projectionWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  16,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [output_size].
+    const ConstTensorPin projectionBiasPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  17,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional())
+        || (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional())
+        || (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional())
+        || (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional())
+        || (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional())
+        || (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional())
+        || (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional())
+        || (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
+    {
+        return Fail("%s: Operation has invalid tensor inputs", __func__);
+    }
+
+
+    // Get the optional normalization tensors
+
+    // 20: The input layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
+    //     Used to rescale normalized inputs to activation at input gate.
+    const ConstTensorPin inputLayerNormWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  20,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    // 21: The forget layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM
+    //     Used to rescale normalized inputs to activation at forget gate.
+    const ConstTensorPin forgetLayerNormWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  21,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    // 22: The cell layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
+    //     Used to rescale normalized inputs to activation at cell gate.
+    const ConstTensorPin cellLayerNormWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  22,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    // 23: The output layer normalization weights. A 1-D tensor of shape [num_units].
+    //     Used to rescale normalized inputs to activation at output gate.
+    const ConstTensorPin outputLayerNormWeightsPin =
+            ConvertOperationInputToConstTensorPin(operation,
+                                                  23,
+                                                  model,
+                                                  data,
+                                                  g_DontPermute,
+                                                  nullptr,
+                                                  true);
+
+    if ((!inputLayerNormWeightsPin.IsValid() && !inputLayerNormWeightsPin.IsOptional())
+        || (!forgetLayerNormWeightsPin.IsValid() && !forgetLayerNormWeightsPin.IsOptional())
+        || (!cellLayerNormWeightsPin.IsValid() && !cellLayerNormWeightsPin.IsOptional())
+        || (!outputLayerNormWeightsPin.IsValid() && !outputLayerNormWeightsPin.IsOptional()))
+    {
+        return Fail("%s: Operation has invalid tensor inputs", __func__);
+    }
+
+    // Get the optional input scalars:
+    // 24: The cell clip:  If provided the cell state is clipped by this value prior to the cell output activation.
+    // 25: The projection clip: If provided and projection is enabled, this is used for clipping the projected values.
+
+    // Get the mandatory input scalars:
+    // 26: The scale of the intermediate result of matmul, i.e. input to layer normalization, at input gate.
+    // 27: The scale of the intermediate result of matmul, i.e. input to layer normalization, at forget gate.
+    // 28: The scale of the intermediate result of matmul, i.e. input to layer normalization, at cell gate.
+    // 29: The scale of the intermediate result of matmul, i.e. input to layer normalization, at output gate.
+    // 30: The zero point of the hidden state, i.e. input to projection.
+    // 31: The scale of the hidden state, i.e. input to projection.
+    float cellClip, projClip, matMulInputGate, matMulForgetGate, matMulCellGate, matMulOutputGate, projInputScale;
+    int projInputZeroPoint;
+
+    if (!GetInputScalar(operation, 24, OperandType::FLOAT32, cellClip, model, data, true) ||
+        !GetInputScalar(operation, 25, OperandType::FLOAT32, projClip, model, data, true) ||
+        !GetInputScalar(operation, 26, OperandType::FLOAT32, matMulInputGate, model, data) ||
+        !GetInputScalar(operation, 27, OperandType::FLOAT32, matMulForgetGate, model, data) ||
+        !GetInputScalar(operation, 28, OperandType::FLOAT32, matMulCellGate, model, data) ||
+        !GetInputScalar(operation, 29, OperandType::FLOAT32, matMulOutputGate, model, data) ||
+        !GetInputScalar(operation, 30, OperandType::INT32, projInputZeroPoint, model, data) ||
+        !GetInputScalar(operation, 31, OperandType::FLOAT32, projInputScale, model, data))
+    {
+        return Fail("%s: Operation has invalid scalar inputs", __func__);
+    }
+
+    // Outputs:
+    // 0: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size,
+    // output_size].
+    const Operand* outputStateOut = GetOutputOperand(operation, 0, model);
+    if (!outputStateOut)
+    {
+        return Fail("%s: Could not read output 0: outputStateOut", __func__);
+    }
+
+    // 1: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
+    const Operand* cellStateOut = GetOutputOperand(operation, 1, model);
+    if (!cellStateOut)
+    {
+        return Fail("%s: Could not read output 1: cellStateOut", __func__);
+    }
+
+    // 2: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size, output_size].
+    // This is effectively the same as the current “output state (out)” value.
+    const Operand* output = GetOutputOperand(operation, 2, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 2: output", __func__);
+    }
+
+    // set the params structure for the AddLstmLayer call
+    LstmInputParams params;
+    params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
+    params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
+    params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
+    params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
+    params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
+    params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
+    params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
+    params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
+    params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
+    params.m_CellBias = cellBiasPin.GetConstTensorPtr();
+    params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
+    params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
+    params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
+    params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
+    params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
+    params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
+    params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
+
+    // set the layer descriptor
+    QLstmDescriptor desc;
+    desc.m_CellClip = cellClip;
+    desc.m_ProjectionClip = projClip;
+    desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
+                          params.m_RecurrentToInputWeights == nullptr ||
+                          params.m_InputGateBias == nullptr);
+    desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
+                              params.m_CellToOutputWeights != nullptr);
+    desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
+    desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
+                               params.m_ForgetLayerNormWeights != nullptr ||
+                               params.m_CellLayerNormWeights != nullptr ||
+                               params.m_OutputLayerNormWeights != nullptr);
+    desc.m_InputIntermediateScale = matMulInputGate;
+    desc.m_ForgetIntermediateScale = matMulForgetGate;
+    desc.m_CellIntermediateScale = matMulCellGate;
+    desc.m_OutputIntermediateScale = matMulOutputGate;
+    desc.m_HiddenStateScale = projInputScale;
+    desc.m_HiddenStateZeroPoint = projInputZeroPoint;
+
+    // validate the optional input groups
+    if (desc.m_CifgEnabled &&
+        (params.m_InputToInputWeights != nullptr ||
+         params.m_RecurrentToInputWeights != nullptr ||
+         params.m_InputGateBias != nullptr))
+    {
+        return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
+                    " and input gate bias must be provided", __func__);
+    }
+
+    if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
+    {
+        return Fail("%s: projection bias should not be provided without projection weights", __func__);
+    }
+
+    if (desc.m_PeepholeEnabled &&
+        (params.m_CellToForgetWeights == nullptr ||
+         params.m_CellToOutputWeights == nullptr ||
+         (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
+    {
+        return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
+                    " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
+    }
+
+    if (desc.m_LayerNormEnabled &&
+        (params.m_ForgetLayerNormWeights == nullptr ||
+         params.m_CellLayerNormWeights == nullptr ||
+         params.m_OutputLayerNormWeights == nullptr ||
+         (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
+    {
+        return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
+                    " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
+    }
+
+    // Basic parameters
+    LstmInputParamsInfo paramsInfo;
+    paramsInfo.m_InputToForgetWeights     = &(params.m_InputToForgetWeights->GetInfo());
+    paramsInfo.m_InputToCellWeights       = &(params.m_InputToCellWeights->GetInfo());
+    paramsInfo.m_InputToOutputWeights     = &(params.m_InputToOutputWeights->GetInfo());
+    paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
+    paramsInfo.m_RecurrentToCellWeights   = &(params.m_RecurrentToCellWeights->GetInfo());
+    paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
+    paramsInfo.m_ForgetGateBias           = &(params.m_ForgetGateBias->GetInfo());
+    paramsInfo.m_CellBias                 = &(params.m_CellBias->GetInfo());
+    paramsInfo.m_OutputGateBias           = &(params.m_OutputGateBias->GetInfo());
+
+    // Inputs
+    const TensorInfo& inputInfo = input.GetTensorInfo();
+    const TensorInfo& outputStatePrevTimeStepInfo = outputStatePrevTimeStep.GetTensorInfo();
+    const TensorInfo& cellStatePrevTimeStepInfo = cellStatePrevTimeStep.GetTensorInfo();
+
+    // Outputs
+    TensorInfo outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
+    TensorInfo outputInfo = GetTensorInfoForOperand(*output);
+    const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
+
+    // Optional parameters
+    if (!desc.m_CifgEnabled)
+    {
+        paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
+        paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
+        if (desc.m_PeepholeEnabled)
+        {
+            paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
+        }
+        paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
+    }
+
+
+    if (desc.m_ProjectionEnabled)
+    {
+        paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
+        if (params.m_ProjectionBias != nullptr)
+        {
+            paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
+        }
+    }
+    else
+    {
+        // If Projection is disabled, override non-const outputs to change the quant info with hidden params, then
+        // create a new const TensorInfo based on this
+        outputStateOutInfo.SetQuantizationScale(projInputScale);
+        outputStateOutInfo.SetQuantizationOffset(projInputZeroPoint);
+        outputInfo.SetQuantizationScale(projInputScale);
+        outputInfo.SetQuantizationOffset(projInputZeroPoint);
+    }
+
+    const TensorInfo constOutputStateOutInfo(outputStateOutInfo);
+    const TensorInfo constOutputInfo(outputInfo);
+
+    if (desc.m_PeepholeEnabled)
+    {
+        paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
+        paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
+    }
+
+    if (desc.m_LayerNormEnabled)
+    {
+        if(!desc.m_CifgEnabled)
+        {
+            paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
+        }
+        paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
+        paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
+        paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
+    }
+
+    // Check if the layer is supported
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& cellStateOutInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsQLstmSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputStatePrevTimeStepInfo,
+                                   cellStatePrevTimeStepInfo,
+                                   constOutputStateOutInfo,
+                                   cellStateOutInfo,
+                                   constOutputInfo,
+                                   desc,
+                                   paramsInfo);
+    };
+
+    bool isDynamic = false;
+    if (!IsDynamicTensor(constOutputStateOutInfo) &&
+        !IsDynamicTensor(cellStateOutInfo)  &&
+        !IsDynamicTensor(constOutputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isDynamic = true;
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    // Add the layer
+    IConnectableLayer* layer = data.m_Network->AddQLstmLayer(desc, params, "QLstm");
+
+    input.Connect(layer->GetInputSlot(0));
+    outputStatePrevTimeStep.Connect(layer->GetInputSlot(1));
+    cellStatePrevTimeStep.Connect(layer->GetInputSlot(2));
+
+    if (!isDynamic)
+    {
+        return ( SetupAndTrackLayerOutputSlot(
+                operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
+                 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
+                 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
+    }
+    else
+    {
+        return ( SetupAndTrackLayerOutputSlot(
+                operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
+                 SetupAndTrackLayerOutputSlot(
+                         operation, 1, *layer, 1, model, data, nullptr, validateFunc,
+                         ActivationFn::kActivationNone, true) &&
+                 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
+    }
+}
+
+bool Converter::ConvertQuantized16BitLstm(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertQuantized16BitLstm()";
+    VLOG(DRIVER) << "Policy::ConvertQuantized16BitLstm()";
+
+    //Inputs:
+    // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
+    //    specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0: input", __func__);
+    }
+
+    //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
+    //    [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
+    //    It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
+    LayerInputHandle previousCellStateIn = ConvertToLayerInputHandle(operation, 13, model, data);
+    if (!previousCellStateIn.IsValid())
+    {
+        return Fail("%s: Could not read input 13: previousCellStateIn", __func__);
+    }
+
+    // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
+    //     [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
+    //     is quantized with a fixed quantization range of -1, 127/128.
+    LayerInputHandle previousOutputIn = ConvertToLayerInputHandle(operation, 14, model, data);
+    if (!previousOutputIn.IsValid())
+    {
+        return Fail("%s: Could not read input 14: previousOutputIn", __func__);
+    }
+
+    // Get the input tensors:
+    // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
+    //    [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
+    //    LSTM cell. Quantization zero point and scale must be the same across all the weights.
+    const ConstTensorPin inputToInputWeightsPin =
+        ConvertOperationInputToConstTensorPin(operation, 1, model, data);
+
+    // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
+    //    [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
+    //    LSTM cell. Quantization zero point and scale must be the same across all the weights.
+    const ConstTensorPin inputToForgetWeightsPin =
+        ConvertOperationInputToConstTensorPin(operation, 2, model, data);
+
+    // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
+    //    [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
+    //    LSTM cell. Quantization zero point and scale must be the same across all the weights.
+    const ConstTensorPin inputToCellWeightsPin =
+        ConvertOperationInputToConstTensorPin(operation, 3, model, data);
+
+    // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
+    //    [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
+    //    LSTM cell. Quantization zero point and scale must be the same across all the weights.
+    const ConstTensorPin inputToOutputWeightsPin =
+        ConvertOperationInputToConstTensorPin(operation, 4, model, data);
+
+    // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
+    //    [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
+    //    the LSTM cell. Quantization zero point and scale must be the same across all the weights.
+    const ConstTensorPin recurrentToInputWeightsPin =
+        ConvertOperationInputToConstTensorPin(operation, 5, model, data);
+
+    // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
+    //    [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
+    //    the LSTM cell. Quantization zero point and scale must be the same across all the weights.
+    const ConstTensorPin recurrentToForgetWeightsPin =
+        ConvertOperationInputToConstTensorPin(operation, 6, model, data);
+
+    // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
+    //    [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
+    //    the LSTM cell. Quantization zero point and scale must be the same across all the weights.
+    const ConstTensorPin recurrentToCellWeightsPin =
+        ConvertOperationInputToConstTensorPin(operation, 7, model, data);
+
+    // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
+    //    [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
+    //    the LSTM cell. Quantization zero point and scale must be the same across all the weights.
+    const ConstTensorPin recurrentToOutputWeightsPin =
+        ConvertOperationInputToConstTensorPin(operation, 8, model, data);
+
+    // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
+    //    bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
+    //    of input and weights scales and zeroPoint equal to 0.
+    const ConstTensorPin inputGateBiasPin =
+        ConvertOperationInputToConstTensorPin(operation, 9, model, data);
+
+    // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
+    //     the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
+    //     of input and weights scales and zeroPoint equal to 0.
+    const ConstTensorPin forgetGateBiasPin =
+        ConvertOperationInputToConstTensorPin(operation, 10, model, data);
+
+    // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
+    //    for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
+    //    and weights scales and zeroPoint equal to 0.
+    const ConstTensorPin cellBiasPin =
+        ConvertOperationInputToConstTensorPin(operation, 11, model, data);
+
+    // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
+    //    the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
+    //    of input and weights scales and zeroPoint equal to 0.
+    const ConstTensorPin outputGateBiasPin =
+        ConvertOperationInputToConstTensorPin(operation, 12, model, data);
+
+    if (!inputToInputWeightsPin.IsValid() ||
+        !inputToForgetWeightsPin.IsValid() ||
+        !inputToCellWeightsPin.IsValid() ||
+        !inputToOutputWeightsPin.IsValid() ||
+        !recurrentToInputWeightsPin.IsValid() ||
+        !recurrentToForgetWeightsPin.IsValid() ||
+        !recurrentToCellWeightsPin.IsValid() ||
+        !recurrentToOutputWeightsPin.IsValid() ||
+        !inputGateBiasPin.IsValid() ||
+        !forgetGateBiasPin.IsValid() ||
+        !cellBiasPin.IsValid() ||
+        !outputGateBiasPin.IsValid())
+    {
+        return Fail("%s: Operation has invalid tensor inputs", __func__);
+    }
+
+    // Outputs:
+    // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
+    //    which contains a cell state from the current time step. Tensor is quantized using a quantization range
+    //    of -2^4, 2^4 * 32767/32768.
+    const Operand* cellStateOut = GetOutputOperand(operation, 0, model);
+    if (!cellStateOut)
+    {
+        return Fail("%s: Could not read output 0: cellStateOut", __func__);
+    }
+
+    // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
+    //      contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
+    const Operand* output = GetOutputOperand(operation, 1, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 1: output", __func__);
+    }
+
+    // Inputs
+    const TensorInfo& inputInfo               = input.GetTensorInfo();
+    const TensorInfo& previousCellStateInInfo = previousCellStateIn.GetTensorInfo();
+    const TensorInfo& previousOutputInInfo    = previousOutputIn.GetTensorInfo();
+
+    // Outputs
+    const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
+    const TensorInfo& outputInfo       = GetTensorInfoForOperand(*output);
+
+    // Dynamic tensors currently not supported
+    if (IsDynamicTensor(cellStateOutInfo) || IsDynamicTensor(outputInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    QuantizedLstmInputParams params;
+
+    params.m_InputToInputWeights      = inputToInputWeightsPin.GetConstTensorPtr();
+    params.m_InputToForgetWeights     = inputToForgetWeightsPin.GetConstTensorPtr();
+    params.m_InputToCellWeights       = inputToCellWeightsPin.GetConstTensorPtr();
+    params.m_InputToOutputWeights     = inputToOutputWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToInputWeights  = recurrentToInputWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToCellWeights   = recurrentToCellWeightsPin.GetConstTensorPtr();
+    params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
+    params.m_InputGateBias            = inputGateBiasPin.GetConstTensorPtr();
+    params.m_ForgetGateBias           = forgetGateBiasPin.GetConstTensorPtr();
+    params.m_CellBias                 = cellBiasPin.GetConstTensorPtr();
+    params.m_OutputGateBias           = outputGateBiasPin.GetConstTensorPtr();
+
+    QuantizedLstmInputParamsInfo paramsInfo;
+    paramsInfo.m_InputToInputWeights      = &(params.m_InputToInputWeights->GetInfo());
+    paramsInfo.m_InputToForgetWeights     = &(params.m_InputToForgetWeights->GetInfo());
+    paramsInfo.m_InputToCellWeights       = &(params.m_InputToCellWeights->GetInfo());
+    paramsInfo.m_InputToOutputWeights     = &(params.m_InputToOutputWeights->GetInfo());
+    paramsInfo.m_RecurrentToInputWeights  = &(params.m_RecurrentToInputWeights->GetInfo());
+    paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
+    paramsInfo.m_RecurrentToCellWeights   = &(params.m_RecurrentToCellWeights->GetInfo());
+    paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
+    paramsInfo.m_InputGateBias            = &(params.m_InputGateBias->GetInfo());
+    paramsInfo.m_ForgetGateBias           = &(params.m_ForgetGateBias->GetInfo());
+    paramsInfo.m_CellBias                 = &(params.m_CellBias->GetInfo());
+    paramsInfo.m_OutputGateBias           = &(params.m_OutputGateBias->GetInfo());
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsQuantizedLstmSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   previousCellStateInInfo,
+                                   previousOutputInInfo,
+                                   cellStateOutInfo,
+                                   outputInfo,
+                                   paramsInfo);
+    };
+
+    bool isDynamic = false;
+    if (!IsDynamicTensor(cellStateOutInfo) &&
+        !IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isDynamic = true;
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
+    input.Connect(layer->GetInputSlot(0));
+    previousCellStateIn.Connect(layer->GetInputSlot(1));
+    previousOutputIn.Connect(layer->GetInputSlot(2));
+
+    if (!isDynamic)
+    {
+        return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
+                SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data));
+    }
+    else
+    {
+        return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
+                SetupAndTrackLayerOutputSlot(
+                    operation, 1, *layer, 1, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
+    }
+
+}
+
+bool Converter::ConvertRank(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertRank()";
+
+    const Operand* inputOperand = GetInputOperand(operation, 0, model);
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+
+    if (inputOperand == nullptr || outputOperand == nullptr)
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Shape inputOperandShape = GetOperandShape(*inputOperand);
+    const Shape outputOperandShape = GetOperandShape(*outputOperand);
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0", __func__);
+    }
+
+    armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
+    if (IsDynamicTensor(outInfo))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsRankSupported,
+                               data.m_Backends,
+                               isSupported,
+                               input.GetTensorInfo(),
+                               outInfo);
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddRankLayer();
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, &outInfo);
+}
+
+bool Converter::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertReLu()";
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::ReLu;
+
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Input 0 is invalid", "operationName");
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return false;
+    }
+
+    const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
+
+    bool isSupported = false;
+
+    auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsActivationSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input.GetTensorInfo(),
+                                   outInfo,
+                                   desc);
+    };
+
+    if(IsDynamicTensor(outInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(desc);
+    ARMNN_ASSERT(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertReLu1()";
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::BoundedReLu;
+    desc.m_A        = 1.0f;
+    desc.m_B        = -1.0f;
+
+    return ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool Converter::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertReLu6()";
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::BoundedReLu;
+    desc.m_A        = 6.0f;
+
+    return ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool Converter::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertReshape()";
+
+    const Operand* inputOperand = GetInputOperand(operation, 0, model);
+    const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+
+    if (inputOperand == nullptr
+        || requestedShapeOperand == nullptr
+        || outputOperand == nullptr)
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    if (requestedShapeOperand->dimensions.size() != 1)
+    {
+        return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
+                    __func__, requestedShapeOperand->dimensions.size());
+    }
+
+    std::vector<int32_t> targetDimensions;
+    if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
+    {
+        return Fail("%s: Could not read values of input 1", __func__);
+    }
+
+    const Shape inputOperandShape = GetOperandShape(*inputOperand);
+
+    Shape requestedShape;
+    // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
+    // function that resolves these values into a fully specified tensor shape.
+    if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
+    {
+        return Fail("%s: Failed to resolve the requested shape", __func__);
+    }
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0", __func__);
+    }
+
+    armnn::ReshapeDescriptor reshapeDescriptor;
+    reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
+                                                         requestedShape.dimensions.data());
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsReshapeSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input.GetTensorInfo(),
+                                   outputInfo,
+                                   reshapeDescriptor);
+    };
+
+    if(!IsDynamicTensor(outputInfo))
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    else
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertResize(const Operation& operation,
+                              const Model& model,
+                              ConversionData& data,
+                              ResizeMethod resizeMethod)
+{
+    VLOG(DRIVER) << "Converter::ConvertResize()";
+    VLOG(DRIVER) << "resizeMethod = " << GetResizeMethodAsCString(resizeMethod);
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Could not read input 0", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const TensorInfo& inputInfo  = input.GetTensorInfo();
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    ResizeDescriptor descriptor;
+    descriptor.m_Method     = resizeMethod;
+    descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
+
+    OperandType operandType1;
+    OperandType operandType2;
+
+    if (!GetOperandType(operation, 1, model, operandType1) ||
+        !GetOperandType(operation, 2, model, operandType2))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    if (operandType1 != operandType2)
+    {
+        return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
+    }
+
+    if (operandType1 == OperandType::INT32)
+    {
+        // Case 1: resizing by shape
+        int32_t targetWidth  = 0;
+        int32_t targetHeight = 0;
+
+        if (!GetInputInt32(operation, 1, targetWidth, model, data) ||
+            !GetInputInt32(operation, 2, targetHeight, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
+        }
+
+        if (targetWidth < 0 || targetHeight < 0)
+        {
+            return Fail("%s: Operation has invalid inputs for resizing by shape. "
+                        "Target width/height cannot be < 0", __func__);
+        }
+
+        descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
+        descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
+    }
+    else if (operandType1 == OperandType::FLOAT32)
+    {
+        // Case 2: resizing by scale
+        float widthScale  = 1.0f;
+        float heightScale = 1.0f;
+
+        if (!GetInputFloat32(operation, 1, widthScale, model, data) ||
+            !GetInputFloat32(operation, 2, heightScale, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
+        }
+
+        const TensorShape& inputShape = inputInfo.GetShape();
+        armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
+
+        float width  = inputShape[dataLayoutIndexed.GetWidthIndex()];
+        float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
+
+        descriptor.m_TargetWidth  = std::floor(width  * widthScale);
+        descriptor.m_TargetHeight = std::floor(height * heightScale);
+    }
+    else if (operandType1 == OperandType::FLOAT16)
+    {
+        Half widthScale;
+        Half heightScale;
+
+        if (!GetInputScalar(operation, 1, OperandType::FLOAT16, widthScale, model, data) ||
+            !GetInputScalar(operation, 2, OperandType::FLOAT16, heightScale, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
+        }
+
+        const TensorShape& inputShape = inputInfo.GetShape();
+        armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
+
+        Half width  = static_cast<Half>(inputShape[dataLayoutIndexed.GetWidthIndex()]);
+        Half height = static_cast<Half>(inputShape[dataLayoutIndexed.GetHeightIndex()]);
+
+        descriptor.m_TargetWidth  = std::floor(width  * widthScale);
+        descriptor.m_TargetHeight = std::floor(height * heightScale);
+    }
+    else
+    {
+        return Fail("%s: Operand has invalid data type for resizing by scale", __func__);
+    }
+
+    descriptor.m_AlignCorners     = GetOptionalBool(operation, 4, model, data);
+    descriptor.m_HalfPixelCenters = GetOptionalBool(operation, 5, model, data);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsResizeSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   descriptor);
+        };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertSpaceToBatchNd()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if(!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo &inputInfo = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+    unsigned int spatialDim = rank - 2;
+
+    if(rank != 4)
+    {
+        Fail("%s: Only inputs with rank 4 are supported", __func__);
+    }
+
+    const Operand *output = GetOutputOperand(operation, 0, model);
+    if(!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo &outputInfo = GetTensorInfoForOperand(*output);
+
+    const Operand *blockShapeOperand = GetInputOperand(operation, 1, model);
+    const Operand *paddingsOperand = GetInputOperand(operation, 2, model);
+
+    armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
+    if(blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
+    {
+        return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
+    }
+
+    std::vector<int32_t> blockShape;
+    if(!GetTensorInt32Values(*blockShapeOperand, blockShape, model, data))
+    {
+        return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
+    }
+    if(std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i)
+    { return i < 1; }))
+    {
+        return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
+    }
+
+    armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
+    if(paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
+    {
+        return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
+    }
+
+    std::vector<std::pair<unsigned int, unsigned int>> paddingList;
+    std::vector<int32_t> paddings;
+    if(!GetTensorInt32Values(*paddingsOperand, paddings, model, data))
+    {
+        return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
+    }
+    for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
+    {
+        int paddingBeforeInput = paddings[i];
+        int paddingAfterInput = paddings[i + 1];
+        if(paddingBeforeInput < 0 || paddingAfterInput < 0)
+        {
+            return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
+        }
+
+        paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
+    }
+
+    armnn::SpaceToBatchNdDescriptor descriptor;
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+    descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
+    descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
+
+    if(Is12OrLaterOperand(*output))
+    {
+        descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
+    }
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo &outputInfo, bool &isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsSpaceToBatchNdSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    } else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if(!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer *const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertSpaceToDepth()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid() )
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const TensorInfo& inputInfo = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+    if (rank != 4)
+    {
+        return Fail("%s: Only inputs with rank 4 are supported", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    SpaceToDepthDescriptor desc;
+
+    GetInputScalar(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
+
+    if (desc.m_BlockSize <= 1)
+    {
+        return Fail("%s: Block size must be at least 1 in all dimensions");
+    }
+
+    desc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsSpaceToDepthSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   desc);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertSoftmax()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* outputOperand = GetOutputOperand(operation, 0, model);
+    if (!outputOperand)
+    {
+        return Fail("%s: Operation has no outputs", __func__);
+    }
+
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
+
+    SoftmaxDescriptor desc;
+    OperandType outputType = outputOperand->type;
+
+    // Read beta value
+    if (outputType == OperandType::TENSOR_FLOAT16)
+    {
+        Half value;
+
+        if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
+        }
+
+        desc.m_Beta = static_cast<float>(value);
+    }
+    else
+    {
+        if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
+        }
+    }
+
+    if (operation.inputs.size() > 2 && !GetInputScalar(operation,
+                                                                  2,
+                                                                  OperandType::INT32,
+                                                                  desc.m_Axis,
+                                                                  model,
+                                                                  data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsSoftmaxSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input.GetTensorInfo(),
+                                   outputInfo,
+                                   desc);
+        };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertSub()";
+
+    LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
+    LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
+
+    if (!input0.IsValid() || !input1.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    // The FuseActivation parameter is always the input index 2
+    // and it should be optional
+    ActivationFn activationFunction;
+    if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsSubtractionSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   input0.GetTensorInfo(),
+                                   input1.GetTensorInfo(),
+                                   outputInfo);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
+
+    bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
+    if (!isReshapeSupported)
+    {
+        return false;
+    }
+    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
+                                        data, nullptr, validateFunc, activationFunction);
+}
+
+bool Converter::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertTanH()";
+
+    armnn::ActivationDescriptor desc;
+    desc.m_Function = armnn::ActivationFunction::TanH;
+    desc.m_A = 1.0f; // android nn does not support tanH parameters
+    desc.m_B = 1.0f; // set to 1.0f for unity scaling
+
+    return ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool Converter::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertTransposeConv2d()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const TensorInfo& inputInfo  = input.GetTensorInfo();
+    const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    // ArmNN does not currently support non-fixed weights or bias
+    // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
+    const Operand* weightsOperand = GetInputOperand(operation, 1, model);
+
+    if (weightsOperand == nullptr)
+    {
+        return Fail("%s: Operand is invalid", __func__);
+    }
+    TransposeConvolution2dDescriptor desc;
+    desc.m_DataLayout = DataLayout::NHWC;
+
+    // Determine whether padding is implicit or explicit
+    bool implicitPadding = operation.inputs.size() == 9;
+
+    if (implicitPadding )
+    {
+        desc.m_DataLayout = OptionalDataLayout(operation, 8, model, data);
+    }
+    else
+    {
+        desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
+    }
+
+    armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
+    unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
+    unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
+
+    const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
+
+    // The shape of the weight is [depth_out, filter_height, filter_width, depth_in].
+    // We have to permute it to OIHW if the data layout is NCHW.
+    const ConstTensorPin weightsPin = (desc.m_DataLayout == DataLayout::NCHW) ?
+                                      ConvertOperationInputToConstTensorPin(operation, 1,
+                                                                                       model, data, OHWIToOIHW) :
+                                      ConvertOperationInputToConstTensorPin(operation, 1, model, data);
+
+    // Bias is a 1D tensor
+    const ConstTensorPin biasPin =
+        ConvertOperationInputToConstTensorPin(operation, 2, model, data);
+
+    if (!weightsPin.IsValid())
+    {
+        return Fail("%s: Operation has invalid weights", __func__);
+    }
+
+    if (!biasPin.IsValid())
+    {
+        return Fail("%s: Operation has invalid biases", __func__);
+    }
+
+    ConstTensor weights = weightsPin.GetConstTensor();
+    ConstTensor bias = biasPin.GetConstTensor();
+    SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
+
+    ActivationFn activation;
+
+    if (implicitPadding)
+    {
+        int32_t strideX{0};
+        int32_t strideY{0};
+        int32_t padLeft{0};
+        int32_t padRight{0};
+        int32_t padTop{0};
+        int32_t padBottom{0};
+
+        ::android::nn::PaddingScheme paddingScheme;
+        if (!GetInputPaddingScheme(operation, 4, paddingScheme, model, data) ||
+            !GetInputScalar(operation, 5, OperandType::INT32, strideX, model, data) ||
+            !GetInputScalar(operation, 6, OperandType::INT32, strideY, model, data) ||
+            !GetInputActivationFunction(operation, 7, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
+        }
+
+        const uint32_t kernelX = weights.GetShape()[widthIndex];
+        const uint32_t kernelY = weights.GetShape()[heightIndex];
+
+        // If output shape has been specified as a parameter then extract it and make it available.
+        const Operand* outputShapeOperand = GetInputOperand(operation, 3, model, false);
+        std::vector<int32_t> outputShape;
+        if ((outputShapeOperand) && (GetTensorInt32Values(*outputShapeOperand, outputShape, model, data)))
+        {
+            // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
+            for (int dimension : outputShape)
+            {
+                desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
+            }
+            desc.m_OutputShapeEnabled = true;
+        }
+
+        uint32_t outputX;
+        uint32_t outputY;
+
+        if (IsDynamicTensor(outputInfo))
+        {
+            if (outputShape.size() == 0)
+            {
+                return Fail("%s: Padding sizes cannot be inferred", __func__);
+            }
+
+            outputX = outputShape[widthIndex];
+            outputY = outputShape[heightIndex];
+        }
+        else
+        {
+            outputX = outputInfo.GetShape()[widthIndex];
+            outputY = outputInfo.GetShape()[heightIndex];
+        }
+
+        CalcPaddingTransposeConv(outputX, kernelX, strideX, padLeft, padRight, paddingScheme);
+        CalcPaddingTransposeConv(outputY, kernelY, strideY, padTop, padBottom, paddingScheme);
+
+        // NOTE: The Android NN API allows for negative padding values in TransposeConv2d,
+        // but Arm NN only supports values >= 0
+        if (padLeft < 0 || padRight < 0 || padTop < 0 || padBottom < 0)
+        {
+            return Fail("%s: Negative padding values are not supported", __func__);
+        }
+
+        desc.m_StrideX   = armnn::numeric_cast<uint32_t>(strideX);
+        desc.m_StrideY   = armnn::numeric_cast<uint32_t>(strideY);
+        desc.m_PadLeft   = armnn::numeric_cast<uint32_t>(padLeft);
+        desc.m_PadRight  = armnn::numeric_cast<uint32_t>(padRight);
+        desc.m_PadTop    = armnn::numeric_cast<uint32_t>(padTop);
+        desc.m_PadBottom = armnn::numeric_cast<uint32_t>(padBottom);
+    }
+    else if (operation.inputs.size() == 11)
+    {
+        // explicit padding
+        if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
+            !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
+            !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
+            !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
+            !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
+            !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
+            !GetInputActivationFunction(operation,  9, activation, model, data))
+        {
+            return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
+        }
+    }
+    else
+    {
+        return Fail("%s: Unsupported number of operation inputs", __func__);
+    }
+
+    desc.m_BiasEnabled = true;
+    Optional<TensorInfo> biases(bias.GetInfo());
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsTransposeConvolution2dSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   desc,
+                                   weights.GetInfo(),
+                                   biases);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    IConnectableLayer* startLayer =
+        data.m_Network->AddTransposeConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
+    if (!startLayer)
+    {
+        return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
+    }
+
+    input.Connect(startLayer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
+                                                   data, nullptr, validateFunc, activation);
+}
+
+bool Converter::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertSqrt()";
+    ActivationDescriptor desc;
+    desc.m_Function = ActivationFunction::Sqrt;
+
+    return ::ConvertToActivation(operation, __func__, desc, model, data);
+}
+
+bool Converter::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertSqueeze()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo  = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+    if (rank > 4)
+    {
+        Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
+    {
+        return Fail("%s: Dynamic output tensors are not supported", __func__);
+    }
+
+    // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
+    // if the operand index is out of bounds.
+    const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
+
+    const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
+
+    std::vector<int32_t> axis;
+    if (!axisOperand)
+    {
+        axis.assign(dimensionSequence,
+                    dimensionSequence + rank);
+    }
+    else if (!GetTensorInt32Values(*axisOperand, axis, model, data))
+    {
+        return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
+    }
+
+    std::vector<uint32_t> outputDims;
+    for (unsigned int i = 0; i < rank; i++)
+    {
+        bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
+        auto currentDimension = inputInfo.GetShape()[i];
+        if (skipSqueeze || currentDimension != 1)
+        {
+            outputDims.push_back(currentDimension);
+        }
+    }
+
+    armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
+
+    armnn::TensorInfo outputInfo = inputInfo;
+    outputInfo.SetShape(outShape);
+
+    armnn::ReshapeDescriptor reshapeDesc;
+    reshapeDesc.m_TargetShape = outputInfo.GetShape();
+
+    bool isSupported = false;
+    FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                               IsReshapeSupported,
+                               data.m_Backends,
+                               isSupported,
+                               inputInfo,
+                               outputInfo,
+                               reshapeDesc);
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
+}
+
+bool Converter::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertStridedSlice()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+    if (rank > 4)
+    {
+        Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
+    }
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    const Operand* beginOperand   = GetInputOperand(operation, 1, model);
+    const Operand* endOperand     = GetInputOperand(operation, 2, model);
+    const Operand* stridesOperand = GetInputOperand(operation, 3, model);
+
+    std::vector<int32_t> beginValues;
+    std::vector<int32_t> endValues;
+    std::vector<int32_t> stridesValues;
+
+    // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
+    auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
+    {
+        if (!GetTensorInt32Values(operand, operandValues, model, data))
+        {
+            return false;
+        }
+
+        if (operandValues.size() != rank)
+        {
+            return false;
+        }
+
+        return true;
+    };
+
+    if (!ValidateInputOperands(*beginOperand, beginValues)
+        || !ValidateInputOperands(*endOperand, endValues)
+        || !ValidateInputOperands(*stridesOperand, stridesValues))
+    {
+        return Fail("%s: Operation has invalid input operand", __func__);
+    }
+
+    // Stride cannot have value '0'
+    if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
+    {
+        return Fail("%s: Stride must be non-zero value.", __func__);
+    }
+
+    armnn::StridedSliceDescriptor descriptor;
+    descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
+    descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
+    descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
+    descriptor.m_DataLayout = armnn::DataLayout::NHWC;
+
+    // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
+    if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data) ||
+        !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data) ||
+        !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsStridedSliceSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   descriptor);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    // Check if slice can fit in a inferred output
+    armnn::TensorShape inputShape = inputInfo.GetShape();
+    for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
+    {
+        int stride = descriptor.m_Stride[i];
+
+        if (descriptor.m_ShrinkAxisMask & (1 << i))
+        {
+            // If the difference between the start point and the end point of the slice on an axis being shrunk
+            // is greater than 1 then throw an error as the output will not be large enough to hold the slice
+            if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
+                || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
+            {
+                return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
+            }
+
+            if(stride < 0)
+            {
+                return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
+            }
+        }
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+bool Converter::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
+{
+    VLOG(DRIVER) << "Converter::ConvertTranspose()";
+
+    LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
+    if (!input.IsValid())
+    {
+        return Fail("%s: Operation has invalid inputs", __func__);
+    }
+
+    const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
+    unsigned int rank = inputInfo.GetNumDimensions();
+    if (rank > 4)
+    {
+        Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
+    }
+
+    // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
+    // if the operand index is out of bounds.
+    const Operand* permOperand = GetInputOperand(operation, 1, model, false);
+
+    std::vector<int32_t> perm(rank);
+    if (!permOperand || (permOperand->lifetime == OperandLifeTime::NO_VALUE))
+    {
+        for (unsigned int i = rank; i > 0; i--)
+        {
+            perm[rank - i] = armnn::numeric_cast<int> (i - 1);
+        }
+    }
+    else if (!GetTensorInt32Values(*permOperand, perm, model, data))
+    {
+        return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
+    }
+
+    std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
+
+    armnn::TransposeDescriptor transposeDesc;
+    transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
+
+    const Operand* output = GetOutputOperand(operation, 0, model);
+    if (!output)
+    {
+        return Fail("%s: Could not read output 0", __func__);
+    }
+
+    const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
+
+    bool isSupported = false;
+    auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
+    {
+        FORWARD_LAYER_SUPPORT_FUNC(__func__,
+                                   IsTransposeSupported,
+                                   data.m_Backends,
+                                   isSupported,
+                                   inputInfo,
+                                   outputInfo,
+                                   transposeDesc);
+    };
+
+    if(IsDynamicTensor(outputInfo))
+    {
+        isSupported = AreDynamicTensorsSupported();
+    }
+    else
+    {
+        validateFunc(outputInfo, isSupported);
+    }
+
+    if (!isSupported)
+    {
+        return false;
+    }
+
+    armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
+    assert(layer != nullptr);
+    input.Connect(layer->GetInputSlot(0));
+
+    return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
+}
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/Converter.hpp b/shim/sl/canonical/Converter.hpp
new file mode 100644
index 0000000..8549289
--- /dev/null
+++ b/shim/sl/canonical/Converter.hpp
@@ -0,0 +1,164 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "ConversionUtils.hpp"
+
+#include <nnapi/OperandTypes.h>
+#include <nnapi/Result.h>
+#include <nnapi/Types.h>
+
+#include <armnn/Types.hpp>
+using namespace armnn;
+
+namespace armnn_driver
+{
+
+class Converter
+{
+
+public:
+    using Model                     = ::android::nn::Model;
+    using Operand                   = ::android::nn::Operand;
+    using OperandLifeTime           = ::android::nn::Operand::LifeTime;
+    using OperandType               = ::android::nn::OperandType;
+    using Operation                 = ::android::nn::Operation;
+    using OperationType             = ::android::nn::OperationType;
+    using ErrorStatus               = ::android::nn::ErrorStatus;
+    static bool ConvertOperation(const Operation& operation, const Model& model, ConversionData& data);
+
+private:
+    static bool ConvertAdd(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertArgMinMax(const Operation& operation,
+                                 const Model& model,
+                                 ConversionData& data,
+                                 armnn::ArgMinMaxFunction argMinMaxFunction);
+
+    static bool ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertCast(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertComparison(const Operation& operation,
+                                  const Model& model,
+                                  ConversionData& data,
+                                  armnn::ComparisonOperation comparisonOperation);
+
+    static bool ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertDiv(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertElementwiseUnary(const Operation& operation,
+                                        const Model& model,
+                                        ConversionData& data,
+                                        armnn::UnaryOperation unaryOperation);
+
+    static bool ConvertElu(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertFill(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertFloor(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertGather(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertHardSwish(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertLocalResponseNormalization(const Operation& operation,
+                                                  const Model& model,
+                                                  ConversionData& data);
+
+    static bool ConvertLogicalBinary(const Operation& operation,
+                                     const Model& model,
+                                     ConversionData& data,
+                                     armnn::LogicalBinaryOperation logicalOperation);
+
+    static bool ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertLstm(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertMean(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertMul(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertPad(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertQuantized16BitLstm(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertRank(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertReLu(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertReshape(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertResize(const Operation& operation,
+                              const Model& model,
+                              ConversionData& data,
+                              armnn::ResizeMethod resizeMethod);
+
+    static bool ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertSub(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertTanH(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data);
+
+    static bool ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data);
+};
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/DriverOptions.cpp b/shim/sl/canonical/DriverOptions.cpp
new file mode 100644
index 0000000..5c73edf
--- /dev/null
+++ b/shim/sl/canonical/DriverOptions.cpp
@@ -0,0 +1,323 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#define LOG_TAG "arm-armnn-sl"
+
+#include "DriverOptions.hpp"
+
+#include "CanonicalUtils.hpp"
+
+#include <armnn/Version.hpp>
+#include <log/log.h>
+#include "SystemPropertiesUtils.hpp"
+
+#include <OperationsUtils.h>
+
+#include <cxxopts/cxxopts.hpp>
+
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <string>
+#include <sstream>
+
+using namespace android;
+using namespace std;
+
+namespace armnn_driver
+{
+
+DriverOptions::DriverOptions(armnn::Compute computeDevice, bool fp16Enabled)
+    : m_Backends({computeDevice})
+    , m_VerboseLogging(false)
+    , m_RequestInputsAndOutputsDumpDir(std::string(""))
+    , m_ServiceName(std::string("armnn_sl"))
+    , m_ForcedUnsupportedOperations({})
+    , m_ClTunedParametersMode(armnn::IGpuAccTunedParameters::Mode::UseTunedParameters)
+    , m_ClTuningLevel(armnn::IGpuAccTunedParameters::TuningLevel::Rapid)
+    , m_EnableGpuProfiling(false)
+    , m_fp16Enabled(fp16Enabled)
+    , m_FastMathEnabled(false)
+    , m_ShouldExit(false)
+    , m_ExitCode(EXIT_SUCCESS)
+    , m_CachedNetworkFilePath(std::string(""))
+    , m_SaveCachedNetwork(false)
+    , m_NumberOfThreads(0)
+    , m_EnableAsyncModelExecution(false)
+    , m_ArmnnNumberOfThreads(1)
+{
+}
+
+DriverOptions::DriverOptions(const std::vector<armnn::BackendId>& backends, bool fp16Enabled)
+    : m_Backends(backends)
+    , m_VerboseLogging(false)
+    , m_RequestInputsAndOutputsDumpDir(std::string(""))
+    , m_ServiceName(std::string("armnn_sl"))
+    , m_ForcedUnsupportedOperations({})
+    , m_ClTunedParametersMode(armnn::IGpuAccTunedParameters::Mode::UseTunedParameters)
+    , m_ClTuningLevel(armnn::IGpuAccTunedParameters::TuningLevel::Rapid)
+    , m_EnableGpuProfiling(false)
+    , m_fp16Enabled(fp16Enabled)
+    , m_FastMathEnabled(false)
+    , m_ShouldExit(false)
+    , m_ExitCode(EXIT_SUCCESS)
+    , m_CachedNetworkFilePath(std::string(""))
+    , m_SaveCachedNetwork(false)
+    , m_NumberOfThreads(0)
+    , m_EnableAsyncModelExecution(false)
+    , m_ArmnnNumberOfThreads(1)
+{
+}
+
+// This default constructor will example an environment variable called
+// ARMNN_SL_OPTIONS.  It will parse the parameters using the existing cxx
+// opts mechanism.
+DriverOptions::DriverOptions()
+    : m_VerboseLogging(false)
+    , m_RequestInputsAndOutputsDumpDir(std::string(""))
+    , m_ServiceName(std::string("armnn_sl"))
+    , m_ForcedUnsupportedOperations({})
+    , m_ClTunedParametersMode(armnn::IGpuAccTunedParameters::Mode::UseTunedParameters)
+    , m_ClTuningLevel(armnn::IGpuAccTunedParameters::TuningLevel::Rapid)
+    , m_EnableGpuProfiling(false)
+    , m_fp16Enabled(false)
+    , m_FastMathEnabled(false)
+    , m_ShouldExit(false)
+    , m_SaveCachedNetwork(false)
+    , m_NumberOfThreads(0)
+    , m_EnableAsyncModelExecution(false)
+    , m_ArmnnNumberOfThreads(1)
+{
+    std::string unsupportedOperationsAsString;
+    std::string clTunedParametersModeAsString;
+    std::string clTuningLevelAsString;
+    std::vector<std::string> backends;
+    bool showHelp = false;
+    bool showVersion = false;
+
+    const char* rawEnv = std::getenv("ARMNN_SL_OPTIONS");
+    // If the environment variable isn't set we'll continue as if it were an empty string.
+    if (!rawEnv)
+    {
+        rawEnv = "";
+    }
+    string optionsAsString(rawEnv);
+    regex whiteSpaceRegex("\\s+");
+    // Tokienize the string based on whitespace.
+    sregex_token_iterator iter(optionsAsString.begin(), optionsAsString.end(), whiteSpaceRegex, -1);  
+    sregex_token_iterator end;
+    vector<string> cliAsVector(iter, end);
+    // As we're pretending to be a command line, argv[0] should be an executable name.
+    cliAsVector.insert(cliAsVector.begin(), "ARMNN_SL_OPTIONS");
+    // Convert the vector of string to a vector of char* backed by the existing vector.
+    std::vector<char*> argVector;
+    for (const auto& arg : cliAsVector)
+    {
+        argVector.push_back((char*)arg.data());
+    }
+    // Terminate the array.
+    argVector.push_back(nullptr);
+    // Create usable variables.
+    int argc = argVector.size() - 1; // Ignore the null pointer at the end.
+    char** argv = argVector.data();
+
+    cxxopts::Options optionsDesc(argv[0], "Arm NN Support Library for the Android Neural Networks API."
+                                          "The support library will convert Android NNAPI requests "
+                                          "and delegate them to available ArmNN backends.");
+    try
+    {
+        optionsDesc.add_options()
+
+        ("a,enable-fast-math",
+                "Enables fast_math options in backends that support it. Using the fast_math flag can "
+               "lead to performance improvements but may result in reduced or different precision.",
+         cxxopts::value<bool>(m_FastMathEnabled)->default_value("false"))
+
+        ("c,compute","Comma separated list of backends to run layers on. "
+                "Examples of possible values are: CpuRef, CpuAcc, GpuAcc",
+         cxxopts::value<std::vector<std::string>>(backends))
+
+        ("d,request-inputs-and-outputs-dump-dir",
+         "If non-empty, the directory where request inputs and outputs should be dumped",
+         cxxopts::value<std::string>(m_RequestInputsAndOutputsDumpDir)->default_value(""))
+
+        ("f,fp16-enabled", "Enables support for relaxed computation from Float32 to Float16",
+         cxxopts::value<bool>(m_fp16Enabled)->default_value("false"))
+
+        ("h,help", "Show this help",
+         cxxopts::value<bool>(showHelp)->default_value("false")->implicit_value("true"))
+
+        ("m,cl-tuned-parameters-mode",
+         "If 'UseTunedParameters' (the default), will read CL tuned parameters from the file specified by "
+         "--cl-tuned-parameters-file. "
+         "If 'UpdateTunedParameters', will also find the optimum parameters when preparing new networks and update "
+         "the file accordingly.",
+         cxxopts::value<std::string>(clTunedParametersModeAsString)->default_value("UseTunedParameters"))
+
+        ("g,mlgo-cl-tuned-parameters-file",
+        "If non-empty, the given file will be used to load/save MLGO CL tuned parameters. ",
+        cxxopts::value<std::string>(m_ClMLGOTunedParametersFile)->default_value(""))
+
+        ("o,cl-tuning-level",
+         "exhaustive: all lws values are tested "
+         "normal: reduced number of lws values but enough to still have the performance really close to the "
+         "exhaustive approach "
+         "rapid: only 3 lws values should be tested for each kernel ",
+         cxxopts::value<std::string>(clTuningLevelAsString)->default_value("rapid"))
+
+        ("p,gpu-profiling", "Turns GPU profiling on",
+         cxxopts::value<bool>(m_EnableGpuProfiling)->default_value("false"))
+
+        ("q,cached-network-file", "If non-empty, the given file will be used to load/save cached network. "
+                                   "If save-cached-network option is given will save the cached network to given file."
+                                   "If save-cached-network option is not given will load the cached network from given "
+                                   "file.",
+        cxxopts::value<std::string>(m_CachedNetworkFilePath)->default_value(""))
+
+        ("s,save-cached-network",
+                "Enables saving the cached network to the file given with cached-network-file option."
+                " See also --cached-network-file",
+        cxxopts::value<bool>(m_SaveCachedNetwork)->default_value("false"))
+
+        ("number-of-threads",
+         "Assign the number of threads used by the CpuAcc backend. "
+         "Input value must be between 1 and 64. "
+         "Default is set to 0 (Backend will decide number of threads to use).",
+         cxxopts::value<unsigned int>(m_NumberOfThreads)->default_value("0"))
+
+        ("t,cl-tuned-parameters-file",
+         "If non-empty, the given file will be used to load/save CL tuned parameters. "
+         "See also --cl-tuned-parameters-mode",
+         cxxopts::value<std::string>(m_ClTunedParametersFile)->default_value(""))
+
+        ("u,unsupported-operations",
+         "If non-empty, a comma-separated list of operation indices which the driver will forcibly "
+         "consider unsupported",
+         cxxopts::value<std::string>(unsupportedOperationsAsString)->default_value(""))
+
+        ("v,verbose-logging", "Turns verbose logging on",
+         cxxopts::value<bool>(m_VerboseLogging)->default_value("false")->implicit_value("true"))
+
+        ("V,version", "Show version information",
+         cxxopts::value<bool>(showVersion)->default_value("false")->implicit_value("true"))
+        ;
+    }
+    catch (const std::exception& e)
+    {
+        VLOG(DRIVER) << "An error occurred attempting to construct options: " << e.what();
+        std::cout << "An error occurred attempting to construct options: %s" << std::endl;
+        m_ExitCode = EXIT_FAILURE;
+        return;
+    }
+
+    try
+    {
+        cxxopts::ParseResult result = optionsDesc.parse(argc, argv);
+    }
+    catch (const cxxopts::OptionException& e)
+    {
+        VLOG(DRIVER) << "An exception occurred attempting to parse program options: " << e.what();
+        std::cout << optionsDesc.help() << std::endl
+                  << "An exception occurred while parsing program options: " << std::endl
+                  << e.what() << std::endl;
+        m_ShouldExit = true;
+        m_ExitCode = EXIT_FAILURE;
+        return;
+    }
+    if (showHelp)
+    {
+        VLOG(DRIVER) << "Showing help and exiting";
+        std::cout << optionsDesc.help() << std::endl;
+        m_ShouldExit = true;
+        m_ExitCode = EXIT_SUCCESS;
+        return;
+    }
+    if (showVersion)
+    {
+        VLOG(DRIVER) << "Showing version and exiting";
+        std::cout << "ArmNN Android NN driver for the Android Neural Networks API.\n"
+                     "ArmNN v" << ARMNN_VERSION << std::endl;
+        m_ShouldExit = true;
+        m_ExitCode = EXIT_SUCCESS;
+        return;
+    }
+
+    // Convert the string backend names into backendId's.
+    m_Backends.reserve(backends.size());
+    for (auto&& backend : backends)
+    {
+        m_Backends.emplace_back(backend);
+    }
+
+    // If no backends have been specified then the default value is GpuAcc.
+    if (backends.empty())
+    {
+        VLOG(DRIVER) << "No backends have been specified:";
+        std::cout << optionsDesc.help() << std::endl
+                  << "Unable to start:" << std::endl
+                  << "No backends have been specified" << std::endl;
+        m_ShouldExit = true;
+        m_ExitCode = EXIT_FAILURE;
+        return;
+    }
+
+    if (!unsupportedOperationsAsString.empty())
+    {
+        std::istringstream argStream(unsupportedOperationsAsString);
+
+        std::string s;
+        while (!argStream.eof())
+        {
+            std::getline(argStream, s, ',');
+            try
+            {
+                unsigned int operationIdx = std::stoi(s);
+                m_ForcedUnsupportedOperations.insert(operationIdx);
+            }
+            catch (const std::invalid_argument&)
+            {
+                VLOG(DRIVER) << "Ignoring invalid integer argument in -u/--unsupported-operations value: " << s.c_str();
+            }
+        }
+    }
+
+    if (!m_ClTunedParametersFile.empty())
+    {
+        // The mode is only relevant if the file path has been provided
+        if (clTunedParametersModeAsString == "UseTunedParameters")
+        {
+            m_ClTunedParametersMode = armnn::IGpuAccTunedParameters::Mode::UseTunedParameters;
+        }
+        else if (clTunedParametersModeAsString == "UpdateTunedParameters")
+        {
+            m_ClTunedParametersMode = armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters;
+        }
+        else
+        {
+            VLOG(DRIVER) << "Requested unknown cl-tuned-parameters-mode "
+                          << clTunedParametersModeAsString.c_str() << ". Defaulting to UseTunedParameters";
+        }
+
+        if (clTuningLevelAsString == "exhaustive")
+        {
+            m_ClTuningLevel = armnn::IGpuAccTunedParameters::TuningLevel::Exhaustive;
+        }
+        else if (clTuningLevelAsString == "normal")
+        {
+            m_ClTuningLevel = armnn::IGpuAccTunedParameters::TuningLevel::Normal;
+        }
+        else if (clTuningLevelAsString == "rapid")
+        {
+            m_ClTuningLevel = armnn::IGpuAccTunedParameters::TuningLevel::Rapid;
+        }
+        else
+        {
+            VLOG(DRIVER) << "Requested unknown cl-tuner-mode '%s'. "
+                            "Defaulting to rapid" << clTuningLevelAsString.c_str();
+        }
+    }
+}
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/DriverOptions.hpp b/shim/sl/canonical/DriverOptions.hpp
new file mode 100644
index 0000000..4c6b385
--- /dev/null
+++ b/shim/sl/canonical/DriverOptions.hpp
@@ -0,0 +1,69 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+
+#include <set>
+#include <string>
+#include <vector>
+
+namespace armnn_driver
+{
+
+class DriverOptions
+{
+public:
+    DriverOptions(armnn::Compute computeDevice, bool fp16Enabled = false);
+    DriverOptions(const std::vector<armnn::BackendId>& backends, bool fp16Enabled = false);
+    DriverOptions();
+    DriverOptions(DriverOptions&& other) = default;
+
+
+    const std::vector<armnn::BackendId>& GetBackends() const { return m_Backends; }
+    bool IsVerboseLoggingEnabled() const { return m_VerboseLogging; }
+    const std::string& GetRequestInputsAndOutputsDumpDir() const { return m_RequestInputsAndOutputsDumpDir; }
+    const std::string& GetServiceName() const { return m_ServiceName; }
+    const std::set<unsigned int>& GetForcedUnsupportedOperations() const { return m_ForcedUnsupportedOperations; }
+    const std::string& GetClTunedParametersFile() const { return m_ClTunedParametersFile; }
+    const std::string& GetClMLGOTunedParametersFile() const { return m_ClMLGOTunedParametersFile; }
+    armnn::IGpuAccTunedParameters::Mode GetClTunedParametersMode() const { return m_ClTunedParametersMode; }
+    armnn::IGpuAccTunedParameters::TuningLevel GetClTuningLevel() const { return m_ClTuningLevel; }
+    bool IsGpuProfilingEnabled() const { return m_EnableGpuProfiling; }
+    bool IsFastMathEnabled() const { return m_FastMathEnabled; }
+    bool GetFp16Enabled() const { return m_fp16Enabled; }
+    void SetBackends(const std::vector<armnn::BackendId>& backends) { m_Backends = backends; }
+    bool ShouldExit() const { return m_ShouldExit; }
+    int GetExitCode() const { return m_ExitCode; }
+    const std::string& GetCachedNetworkFilePath() const { return m_CachedNetworkFilePath; }
+    bool SaveCachedNetwork() const { return m_SaveCachedNetwork; }
+    unsigned int GetNumberOfThreads() const { return m_NumberOfThreads; }
+    bool isAsyncModelExecutionEnabled() const { return m_EnableAsyncModelExecution; };
+    unsigned int getNoOfArmnnThreads() const { return m_ArmnnNumberOfThreads; };
+
+private:
+    std::vector<armnn::BackendId> m_Backends;
+    bool m_VerboseLogging;
+    std::string m_RequestInputsAndOutputsDumpDir;
+    std::string m_ServiceName;
+    std::set<unsigned int> m_ForcedUnsupportedOperations;
+    std::string m_ClTunedParametersFile;
+    std::string m_ClMLGOTunedParametersFile;
+    armnn::IGpuAccTunedParameters::Mode m_ClTunedParametersMode;
+    armnn::IGpuAccTunedParameters::TuningLevel m_ClTuningLevel;
+    bool m_EnableGpuProfiling;
+    bool m_fp16Enabled;
+    bool m_FastMathEnabled;
+    bool m_ShouldExit;
+    int m_ExitCode;
+    std::string m_CachedNetworkFilePath;
+    bool m_SaveCachedNetwork;
+    unsigned int m_NumberOfThreads;
+    bool m_EnableAsyncModelExecution;
+    unsigned int m_ArmnnNumberOfThreads;
+};
+
+} // namespace armnn_driver
diff --git a/shim/sl/canonical/ModelToINetworkTransformer.cpp b/shim/sl/canonical/ModelToINetworkTransformer.cpp
new file mode 100644
index 0000000..8efacaf
--- /dev/null
+++ b/shim/sl/canonical/ModelToINetworkTransformer.cpp
@@ -0,0 +1,202 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#define LOG_TAG "arm-armnn-sl"
+
+#include "ModelToINetworkTransformer.hpp"
+#include "CanonicalUtils.hpp"
+#include "Converter.hpp"
+
+#include <log/log.h>
+#include <type_traits>
+
+namespace armnn_driver
+{
+
+ModelToINetworkTransformer::ModelToINetworkTransformer(
+    const std::vector<armnn::BackendId>& backends,
+    const Model& model,
+    const std::set<unsigned int>& forcedUnsupportedOperations)
+    : m_Data(backends)
+    , m_Model(model)
+    , m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
+    , m_ConversionResult(ConversionResult::Success)
+{
+    try
+    {
+        Convert();
+    }
+    catch (std::exception& e)
+    {
+        m_ConversionResult = ConversionResult::UnsupportedFeature;
+        VLOG(DRIVER) << "ModelToINetworkTransformer: Unexpected exception: " << e.what();
+        assert(false);
+    }
+}
+
+void ModelToINetworkTransformer::Convert()
+{
+    VLOG(DRIVER) << "ModelToINetworkTransformer: Convert()";
+    //VLOG(DRIVER) << "ModelToINetworkTransformer: Convert(): " << GetModelSummary(m_Model).c_str();
+
+    // map the memory pool into shared pointers
+    m_Data.m_MemPools.clear();
+    if (!setRunTimePoolInfosFromCanonicalMemories(&m_Data.m_MemPools, m_Model.pools))
+    {
+        VLOG(DRIVER) << "Setting of run time pool infos from Hidl Memories has failed." << __func__;
+        m_ConversionResult = ConversionResult::ErrorMappingPools;
+        return;
+    }
+
+    using NetworkOptions = std::vector<armnn::BackendOptions>;
+    NetworkOptions networkOptions;
+    armnn::BackendOptions shapeInferenceMethodOption("ShapeInferenceMethod",
+                                                    {
+                                                            { "InferAndValidate", true }
+                                                    });
+
+    networkOptions.push_back(shapeInferenceMethodOption);
+
+    // Create armnn::INetwork
+    m_Data.m_Network = armnn::INetwork::Create(networkOptions);
+
+    // add operations to it
+    // track which layer outputs each operand
+    VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): m_OutputSlotForOperand";
+    m_Data.m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(m_Model.main.operands.size(), nullptr);
+    try
+    {
+        VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): for m_Model.inputIndexes.size()";
+        for (uint32_t i = 0; i < m_Model.main.inputIndexes.size(); i++)
+        {
+            VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): m_Model.inputIndexes[i]";
+            // inputs in android nn are represented by operands
+            uint32_t inputIndex = m_Model.main.inputIndexes[i];
+            VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): m_Model.operands[inputIndex]";
+            const Operand& operand = m_Model.main.operands[inputIndex];
+            VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): GetTensorInfoForOperand(operand)";
+
+            const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
+            const std::string layerName = "Input_" + std::to_string(i);
+            VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): m_Data.m_Network->AddInputLayer(...)";
+            armnn::IConnectableLayer* layer = m_Data.m_Network->AddInputLayer(i, layerName.c_str());
+
+            VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): layer->GetOutputSlot(0)";
+            armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
+            VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): outputSlot.SetTensorInfo(...)";
+            outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand));
+
+            VLOG(DRIVER) << "ModelToINetworkTransformer::Convert(): store for later layers";
+            // store for later layers
+            m_Data.m_OutputSlotForOperand[inputIndex] = &outputSlot;
+        }
+    }
+    catch (UnsupportedOperand<OperandType>& e)
+    {
+        VLOG(DRIVER) <<  __func__ << "Operand type: " <<  e.m_type << " is not supported in ArmnnDriver";
+        m_ConversionResult = ConversionResult::UnsupportedFeature;
+    }
+    catch (const armnn::InvalidArgumentException& e)
+    {
+        Fail("%s: Failed to convert input operand to TensorShape: %s", __func__, e.what());
+        m_ConversionResult = ConversionResult::UnsupportedFeature;
+    }
+    bool UnsupportedDynamicOperation = false;
+    for (uint32_t operationIdx = 0; operationIdx < m_Model.main.operations.size(); operationIdx++)
+    {
+        const auto& operation = m_Model.main.operations[operationIdx];
+
+        bool ok = true;
+        if (m_ForcedUnsupportedOperations.find(operationIdx) != m_ForcedUnsupportedOperations.end())
+        {
+            Fail("%s: Operation at index %i has been forced to be unsupported.", __func__, operationIdx);
+            ok = false;
+        }
+
+        if (ok)
+        {
+            try
+            {
+                ok = Converter::ConvertOperation(operation, m_Model, m_Data);
+            }
+            catch (UnsupportedOperand<OperandType>& e)
+            {
+                VLOG(DRIVER) << __func__ << "Operation type: " << e.m_type << "is not supported in ArmnnDriver";
+                ok = false;
+            }
+            catch (const armnn::InvalidArgumentException& e)
+            {
+                Fail("%s: Failed to convert operation in %s", __func__, e.what());
+                ok = false;
+            }
+        }
+
+        // Store whether this operation was successfully converted.
+        m_OperationSupported.emplace(operationIdx, ok);
+
+        // Any single operation failing will fail the entire conversion.
+        // We still need to continue and check the other ones.
+        if (!ok)
+        {
+            if (m_Data.m_DynamicInputsEncountered)
+            {
+                Fail("%s: The unsupported operation at index %i has dynamic inputs.", __func__, operationIdx);
+                UnsupportedDynamicOperation = true;
+            }
+
+            m_ConversionResult = ConversionResult::UnsupportedFeature;
+        }
+        m_Data.m_DynamicInputsEncountered = false;
+    }
+
+    // Due to the NNAPI partitioner not supporting partition boundaries of unknown size,
+    // any operations who's outputs connect to an unsupported operation with with dynamic inputs
+    // will cause a failure.
+
+    // The simplest solution to this problem is to not support any operations in a model containing
+    // an unsupported operation with with dynamic inputs.
+    if (UnsupportedDynamicOperation)
+    {
+        Fail("%s: Unsupported operation with dynamic inputs found. Retroactively setting all operations to unsupported",
+             __func__);
+        for (auto& operation : m_OperationSupported)
+        {
+            operation.second = false;
+        }
+    }
+
+    try
+    {
+        if (m_ConversionResult == ConversionResult::Success)
+        {
+            for (uint32_t i = 0; i < m_Model.main.outputIndexes.size(); i++)
+            {
+                // outputs in android nn are represented by operands
+                uint32_t outputIndex = m_Model.main.outputIndexes[i];
+                const auto& operand = m_Model.main.operands[outputIndex];
+                const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
+                const std::string layerName = "Output_" + std::to_string(i);
+                armnn::IConnectableLayer* layer = m_Data.m_Network->AddOutputLayer(i, layerName.c_str());
+
+                assert(m_Data.m_OutputSlotForOperand[outputIndex]);
+                m_Data.m_OutputSlotForOperand[outputIndex]->Connect(layer->GetInputSlot(0));
+            }
+        }
+    }
+    catch (const armnn::InvalidArgumentException& e)
+    {
+        Fail("%s: Failed to convert output operand to TensorShape: %s", __func__, e.what());
+        m_ConversionResult = ConversionResult::UnsupportedFeature;
+    }
+}
+
+bool ModelToINetworkTransformer::IsOperationSupported(uint32_t operationIndex) const
+{
+    std::map<uint32_t, bool>::const_iterator it = m_OperationSupported.find(operationIndex);
+    assert(it != m_OperationSupported.end());
+    return it->second;
+}
+
+} // armnn_driver
diff --git a/shim/sl/canonical/ModelToINetworkTransformer.hpp b/shim/sl/canonical/ModelToINetworkTransformer.hpp
new file mode 100644
index 0000000..d38320c
--- /dev/null
+++ b/shim/sl/canonical/ModelToINetworkTransformer.hpp
@@ -0,0 +1,59 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+//#include "ArmnnDriver.hpp"
+#include "Converter.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <nnapi/IPreparedModel.h>
+#include <nnapi/Result.h>
+#include <nnapi/TypeUtils.h>
+#include <nnapi/Types.h>
+#include <nnapi/Validation.h>
+
+#include <set>
+#include <map>
+#include <vector>
+
+namespace armnn_driver
+{
+
+using namespace android::nn;
+
+// A helper template class performing the conversion from an AndroidNN driver Model representation,
+// to an armnn::INetwork object
+class ModelToINetworkTransformer
+{
+public:
+    ModelToINetworkTransformer(const std::vector<armnn::BackendId>& backends,
+                               const Model& model,
+                               const std::set<unsigned int>& forcedUnsupportedOperations);
+
+    ConversionResult GetConversionResult() const { return m_ConversionResult; }
+
+    // Returns the ArmNN INetwork corresponding to the input model, if preparation went smoothly, nullptr otherwise.
+    armnn::INetwork* GetINetwork() const { return m_Data.m_Network.get(); }
+
+    bool IsOperationSupported(uint32_t operationIndex) const;
+
+private:
+    void Convert();
+
+    // Shared aggregate input/output/internal data
+    ConversionData m_Data;
+
+    // Input data
+    const Model&                  m_Model;
+    const std::set<unsigned int>& m_ForcedUnsupportedOperations;
+
+    // Output data
+    ConversionResult         m_ConversionResult;
+    std::map<uint32_t, bool> m_OperationSupported;
+};
+
+} // armnn_driver
diff --git a/shim/sl/canonical/SystemPropertiesUtils.hpp b/shim/sl/canonical/SystemPropertiesUtils.hpp
new file mode 100644
index 0000000..0397925
--- /dev/null
+++ b/shim/sl/canonical/SystemPropertiesUtils.hpp
@@ -0,0 +1,84 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <stdio.h>
+#include <string>
+#include <iostream>
+#include <sys/system_properties.h>
+#include <log/log.h>
+
+namespace {
+template<typename T>
+struct ConvStringTo;
+
+template<>
+struct ConvStringTo<float>
+{
+    static float Func(std::string s) { return std::stof(s); }
+};
+
+template<>
+struct ConvStringTo<int>
+{
+    static int Func(std::string s) { return std::stoi(s); }
+};
+
+template<>
+struct ConvStringTo<bool>
+{
+    static bool Func(std::string s) { return !!std::stoi(s); }
+};
+
+template<typename T>
+void GetCapabilitiesProperties([[maybe_unused]]void* cookie,
+                               [[maybe_unused]]const char *name,
+                               [[maybe_unused]]const char *value,
+                               [[maybe_unused]]uint32_t serial)
+{
+    T &prop = *reinterpret_cast<T*>(cookie);
+    prop = ConvStringTo<T>::Func(std::string(value));
+}
+
+template<typename T>
+T ParseSystemProperty(const char* name, T defaultValue)
+{
+    try
+    {
+        const prop_info *pInfo = __system_property_find(name);
+        if (!pInfo)
+        {
+            ALOGW("ArmnnDriver::ParseSystemProperty(): Could not find property [%s].", name);
+        } else
+        {
+            T property;
+            __system_property_read_callback(pInfo, &GetCapabilitiesProperties<T>, &property);
+            std::stringstream messageBuilder;
+            messageBuilder << "ArmnnDriver::ParseSystemProperty(): Setting [" << name << "]=[" << property << "].";
+            ALOGD("%s", messageBuilder.str().c_str());
+            return property;
+        }
+    }
+    catch(const std::invalid_argument& e)
+    {
+        ALOGD("ArmnnDriver::ParseSystemProperty(): Property [%s] has invalid data type.", name);
+    }
+    catch(const std::out_of_range& e)
+    {
+        ALOGD("ArmnnDriver::ParseSystemProperty(): Property [%s] out of range for the data type.", name);
+    }
+    catch (...)
+    {
+        ALOGD("ArmnnDriver::ParseSystemProperty(): Unexpected exception reading system "
+            "property [%s].", name);
+    }
+
+    std::stringstream messageBuilder;
+    messageBuilder << "ArmnnDriver::ParseSystemProperty(): Falling back to default value [" << defaultValue << "]";
+    ALOGD("%s", messageBuilder.str().c_str());
+    return defaultValue;
+}
+} //namespace
diff --git a/shim/sl/scripts/NeuralNetworks.patch b/shim/sl/scripts/NeuralNetworks.patch
new file mode 100644
index 0000000..81e859b
--- /dev/null
+++ b/shim/sl/scripts/NeuralNetworks.patch
@@ -0,0 +1,43 @@
+diff --git a/common/types/src/SharedMemoryAndroid.cpp b/common/types/src/SharedMemoryAndroid.cpp
+index c361a1eb6..3c09c5f4d 100644
+--- a/common/types/src/SharedMemoryAndroid.cpp
++++ b/common/types/src/SharedMemoryAndroid.cpp
+@@ -115,8 +115,23 @@ GeneralResult<SharedMemory> allocateSharedMemory(size_t size) {
+     return createSharedMemoryFromUniqueFd(size, prot, std::move(fd), offset);
+ }
+ 
+-GeneralResult<Mapping> map(const Memory::Ashmem& /*memory*/) {
+-    return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Cannot map ashmem memory";
++//GeneralResult<Mapping> map(const Memory::Ashmem& /*memory*/) {
++//    return NN_ERROR(ErrorStatus::INVALID_ARGUMENT) << "Cannot map ashmem memory";
++//}
++
++GeneralResult<Mapping> map(const Memory::Ashmem& memory) {
++    constexpr off64_t offset = 0;
++    constexpr int prot = PROT_READ | PROT_WRITE;
++    std::shared_ptr<base::MappedFile> mapping =
++            base::MappedFile::FromFd(memory.fd, offset, memory.size, prot);
++    if (mapping == nullptr || mapping->data() == nullptr) {
++        return NN_ERROR() << "Can't mmap the file descriptor.";
++    }
++    return Mapping{
++            .pointer = mapping->data(),
++            .size = memory.size,
++            .context = std::move(mapping),
++    };
+ }
+ 
+ #endif  // NN_COMPATIBILITY_LIBRARY_BUILD
+diff --git a/runtime/NeuralNetworks.cpp b/runtime/NeuralNetworks.cpp
+index 678888e9f..805a600bb 100644
+--- a/runtime/NeuralNetworks.cpp
++++ b/runtime/NeuralNetworks.cpp
+@@ -1927,7 +1927,7 @@ int SL_ANeuralNetworksDevice_forEachVendorExtensionOperandTypeInformation(
+ #define NNCL_FUNC(symbol) .symbol = symbol
+ 
+ NnApiSLDriverImplFL7 slDriverImpl{
+-        .base{.implFeatureLevel = ANEURALNETWORKS_FEATURE_LEVEL_7},
++        .base={.implFeatureLevel = ANEURALNETWORKS_FEATURE_LEVEL_7},
+         NNCL_FUNC(ANeuralNetworksBurst_create),
+         NNCL_FUNC(ANeuralNetworksBurst_free),
+         NNCL_FUNC(ANeuralNetworksCompilation_createForDevices),
diff --git a/shim/sl/scripts/clone_aosp_libs.sh b/shim/sl/scripts/clone_aosp_libs.sh
new file mode 100755
index 0000000..370126a
--- /dev/null
+++ b/shim/sl/scripts/clone_aosp_libs.sh
@@ -0,0 +1,84 @@
+#!/usr/bin/env bash
+
+#
+# Copyright © 2022 ARM Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+AOSP_WORKING_DIR=$1
+
+if [ "$#" -ne 1 ]; then
+    echo "Usage: This script must be passed a single parameter which is a path "
+    echo "       to an existing directory where the AOSP repo's will be cloned into."
+    echo "Error: No working directory path parameter provided."
+    exit 1
+fi
+if [ ! -d "$1" ]; then
+    echo "Usage: This script must be passed a single parameter which is a path "
+    echo "       to an existing directory where the AOSP repo's will be cloned into."
+    echo "Error: Working directory path provided is not a directory."
+    exit 1
+fi
+
+echo "AOSP_WORKING_DIR = $AOSP_WORKING_DIR"
+
+# NNAPI SUPPORT (SHA's for each repo taken from master branch 25/03/22)
+git clone https://android.googlesource.com/platform/packages/modules/NeuralNetworks/ "${AOSP_WORKING_DIR}/packages/modules/NeuralNetworks"
+pushd "${AOSP_WORKING_DIR}/packages/modules/NeuralNetworks"
+git checkout 9c2360318a35756addcd5d321a85f9270e0a04da
+popd
+
+git clone https://android.googlesource.com/platform/system/core "${AOSP_WORKING_DIR}/system/core/"
+pushd "${AOSP_WORKING_DIR}/system/core/"
+git  checkout c408ee943a1d9c486e4fac10bee7f76a61c75bab
+popd
+
+git clone https://android.googlesource.com/platform/system/libbase "${AOSP_WORKING_DIR}/system/libbase"
+pushd "${AOSP_WORKING_DIR}/system/libbase"
+git checkout 2d235ac982044ea4985c39a834e2d85c6a8bca8f
+popd
+
+git clone https://android.googlesource.com/platform/system/libfmq "${AOSP_WORKING_DIR}/system/libfmq"
+pushd "${AOSP_WORKING_DIR}/system/libfmq"
+git checkout 331b20e54ddde93785d7688ebb0cdc1cbcf9fd9b
+popd
+
+git clone https://android.googlesource.com/platform/frameworks/native "${AOSP_WORKING_DIR}/frameworks/native"
+pushd "${AOSP_WORKING_DIR}/frameworks/native"
+git checkout fea6523ac18c9d4d40db04c996e833f60ff88489
+popd
+
+git clone https://android.googlesource.com/platform/system/logging "${AOSP_WORKING_DIR}/system/logging"
+pushd "${AOSP_WORKING_DIR}/system/logging"
+git checkout e1a669e529cf5a42cd8b331ca89634bb9dce5cae
+popd
+
+git clone https://android.googlesource.com/platform/external/boringssl "${AOSP_WORKING_DIR}/external/boringssl"
+pushd "${AOSP_WORKING_DIR}/external/boringssl"
+git checkout ebeca38b4ecbe81fdf1d127ef7abb4689722308c
+popd
+
+git clone https://android.googlesource.com/platform/external/tensorflow "${AOSP_WORKING_DIR}/external/tensorflow"
+pushd "${AOSP_WORKING_DIR}/external/tensorflow"
+git checkout a6772d90a9b542ceb50f35f67e1cebf322d8b0d0
+popd
+
+git clone https://android.googlesource.com/platform/external/eigen "${AOSP_WORKING_DIR}/external/eigen"
+pushd "${AOSP_WORKING_DIR}/external/eigen"
+git checkout 10f298fc4175c1b8537c674f654a070c871960e5
+popd
+
+git clone https://android.googlesource.com/platform/external/ruy "${AOSP_WORKING_DIR}/external/ruy"
+pushd "${AOSP_WORKING_DIR}/external/ruy"
+git checkout 4377b97cf0850e0a61caa191586ebe68ccbc2abf
+popd
+
+git clone https://android.googlesource.com/platform/external/gemmlowp "${AOSP_WORKING_DIR}/external/gemmlowp"
+pushd "${AOSP_WORKING_DIR}/external/gemmlowp"
+git checkout 689c69e88b91e7bff068e33396f74c0a5b17390e
+popd
+
+git clone https://android.googlesource.com/platform/prebuilts/vndk/v29 "${AOSP_WORKING_DIR}/prebuilts/vndk/v29"
+pushd "${AOSP_WORKING_DIR}/prebuilts/vndk/v29"
+git checkout 5a73511dd91512681df643ce604d36763cd81b0e
+popd
diff --git a/shim/sl/scripts/libbase_logging_cpp.patch b/shim/sl/scripts/libbase_logging_cpp.patch
new file mode 100644
index 0000000..ecf3e9c
--- /dev/null
+++ b/shim/sl/scripts/libbase_logging_cpp.patch
@@ -0,0 +1,171 @@
+diff --git a/logging.cpp b/logging.cpp
+index 4942e2f..1ff0996 100644
+--- a/logging.cpp
++++ b/logging.cpp
+@@ -209,9 +209,9 @@ static std::recursive_mutex& TagLock() {
+ static std::string* gDefaultTag;
+ 
+ void SetDefaultTag(const std::string& tag) {
+-  if (__builtin_available(android 30, *)) {
+-    __android_log_set_default_tag(tag.c_str());
+-  } else {
++//  if (__builtin_available(android 30, *)) {
++//    __android_log_set_default_tag(tag.c_str());
++//  } else {
+     std::lock_guard<std::recursive_mutex> lock(TagLock());
+     if (gDefaultTag != nullptr) {
+       delete gDefaultTag;
+@@ -220,7 +220,7 @@ void SetDefaultTag(const std::string& tag) {
+     if (!tag.empty()) {
+       gDefaultTag = new std::string(tag);
+     }
+-  }
++//  }
+ }
+ 
+ static bool gInitialized = false;
+@@ -314,13 +314,13 @@ static void LogdLogChunk(LogId id, LogSeverity severity, const char* tag, const
+   int32_t lg_id = LogIdTolog_id_t(id);
+   int32_t priority = LogSeverityToPriority(severity);
+ 
+-  if (__builtin_available(android 30, *)) {
+-    __android_log_message log_message = {sizeof(__android_log_message),     lg_id, priority, tag,
+-                                         static_cast<const char*>(nullptr), 0,     message};
+-    __android_log_logd_logger(&log_message);
+-  } else {
++//  if (__builtin_available(android 30, *)) {
++//    __android_log_message log_message = {sizeof(__android_log_message),     lg_id, priority, tag,
++//                                         static_cast<const char*>(nullptr), 0,     message};
++//    __android_log_logd_logger(&log_message);
++//  } else {
+     __android_log_buf_print(lg_id, priority, tag, "%s", message);
+-  }
++//  }
+ }
+ 
+ LogdLogger::LogdLogger(LogId default_log_id) : default_log_id_(default_log_id) {}
+@@ -396,15 +396,15 @@ LogFunction SetLogger(LogFunction&& logger) {
+   LogFunction old_logger = std::move(Logger());
+   Logger() = std::move(logger);
+ 
+-  if (__builtin_available(android 30, *)) {
+-    __android_log_set_logger([](const struct __android_log_message* log_message) {
+-      auto log_id = log_id_tToLogId(log_message->buffer_id);
+-      auto severity = PriorityToLogSeverity(log_message->priority);
+-
+-      Logger()(log_id, severity, log_message->tag, log_message->file, log_message->line,
+-               log_message->message);
+-    });
+-  }
++//  if (__builtin_available(android 30, *)) {
++//    __android_log_set_logger([](const struct __android_log_message* log_message) {
++//      auto log_id = log_id_tToLogId(log_message->buffer_id);
++//      auto severity = PriorityToLogSeverity(log_message->priority);
++//
++//      Logger()(log_id, severity, log_message->tag, log_message->file, log_message->line,
++//               log_message->message);
++//    });
++//  }
+   return old_logger;
+ }
+ 
+@@ -412,9 +412,9 @@ AbortFunction SetAborter(AbortFunction&& aborter) {
+   AbortFunction old_aborter = std::move(Aborter());
+   Aborter() = std::move(aborter);
+ 
+-  if (__builtin_available(android 30, *)) {
+-    __android_log_set_aborter([](const char* abort_message) { Aborter()(abort_message); });
+-  }
++//  if (__builtin_available(android 30, *)) {
++//    __android_log_set_aborter([](const char* abort_message) { Aborter()(abort_message); });
++//  }
+   return old_aborter;
+ }
+ 
+@@ -500,11 +500,11 @@ LogMessage::~LogMessage() {
+ 
+   // Abort if necessary.
+   if (data_->GetSeverity() == FATAL) {
+-    if (__builtin_available(android 30, *)) {
+-      __android_log_call_aborter(msg.c_str());
+-    } else {
++//    if (__builtin_available(android 30, *)) {
++//      __android_log_call_aborter(msg.c_str());
++//    } else {
+       Aborter()(msg.c_str());
+-    }
++//    }
+   }
+ }
+ 
+@@ -515,11 +515,11 @@ std::ostream& LogMessage::stream() {
+ void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity severity, const char* tag,
+                          const char* message) {
+   int32_t priority = LogSeverityToPriority(severity);
+-  if (__builtin_available(android 30, *)) {
+-    __android_log_message log_message = {
+-        sizeof(__android_log_message), LOG_ID_DEFAULT, priority, tag, file, line, message};
+-    __android_log_write_log_message(&log_message);
+-  } else {
++//  if (__builtin_available(android 30, *)) {
++//    __android_log_message log_message = {
++//        sizeof(__android_log_message), LOG_ID_DEFAULT, priority, tag, file, line, message};
++//    __android_log_write_log_message(&log_message);
++//  } else {
+     if (tag == nullptr) {
+       std::lock_guard<std::recursive_mutex> lock(TagLock());
+       if (gDefaultTag == nullptr) {
+@@ -530,38 +530,38 @@ void LogMessage::LogLine(const char* file, unsigned int line, LogSeverity severi
+     } else {
+       Logger()(DEFAULT, severity, tag, file, line, message);
+     }
+-  }
++//  }
+ }
+ 
+ LogSeverity GetMinimumLogSeverity() {
+-  if (__builtin_available(android 30, *)) {
+-    return PriorityToLogSeverity(__android_log_get_minimum_priority());
+-  } else {
++//  if (__builtin_available(android 30, *)) {
++//    return PriorityToLogSeverity(__android_log_get_minimum_priority());
++//  } else {
+     return gMinimumLogSeverity;
+-  }
++//  }
+ }
+ 
+ bool ShouldLog(LogSeverity severity, const char* tag) {
+   // Even though we're not using the R liblog functions in this function, if we're running on Q,
+   // we need to fall back to using gMinimumLogSeverity, since __android_log_is_loggable() will not
+   // take into consideration the value from SetMinimumLogSeverity().
+-  if (__builtin_available(android 30, *)) {
+-    int32_t priority = LogSeverityToPriority(severity);
+-    return __android_log_is_loggable(priority, tag, ANDROID_LOG_INFO);
+-  } else {
++//  if (__builtin_available(android 30, *)) {
++//    int32_t priority = LogSeverityToPriority(severity);
++//    return __android_log_is_loggable(priority, tag, ANDROID_LOG_INFO);
++//  } else {
+     return severity >= gMinimumLogSeverity;
+-  }
++//  }
+ }
+ 
+ LogSeverity SetMinimumLogSeverity(LogSeverity new_severity) {
+-  if (__builtin_available(android 30, *)) {
+-    int32_t priority = LogSeverityToPriority(new_severity);
+-    return PriorityToLogSeverity(__android_log_set_minimum_priority(priority));
+-  } else {
++//  if (__builtin_available(android 30, *)) {
++//    int32_t priority = LogSeverityToPriority(new_severity);
++//    return PriorityToLogSeverity(__android_log_set_minimum_priority(priority));
++//  } else {
+     LogSeverity old_severity = gMinimumLogSeverity;
+     gMinimumLogSeverity = new_severity;
+     return old_severity;
+-  }
++//  }
+ }
+ 
+ ScopedLogSeverity::ScopedLogSeverity(LogSeverity new_severity) {
diff --git a/shim/sl/scripts/modify_aosp_libs.sh b/shim/sl/scripts/modify_aosp_libs.sh
new file mode 100755
index 0000000..c13976b
--- /dev/null
+++ b/shim/sl/scripts/modify_aosp_libs.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+#
+# Copyright © 2022 ARM Ltd and Contributors. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+AOSP_WORKING_DIR=$1
+
+if [ "$#" -ne 1 ]; then
+
+    echo "Usage: This script must be passed a single parameter which is a path "
+    echo "       to an existing directory where the AOSP repo's have been cloned."
+    echo "Error: No working directory path parameter provided."
+    exit 1
+fi
+if [ ! -d "$1" ]; then
+
+    echo "Usage: This script must be passed a single parameter which is a path "
+    echo "       to an existing directory where the AOSP repo's have been cloned."
+    echo "Error: Working directory path provided is not a directory."
+    exit 1
+fi
+
+SCRIPT_PATH=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
+echo "SCRIPT_PATH= ${SCRIPT_PATH}"
+
+pushd "${AOSP_WORKING_DIR}/system/libbase/"
+  echo "Applying libbase logging.cpp patch"
+  git apply "${SCRIPT_PATH}/libbase_logging_cpp.patch"
+popd
+
+pushd "${AOSP_WORKING_DIR}/packages/modules/NeuralNetworks/"
+  echo "Applying NeuralNetworks patch"
+  git apply "${SCRIPT_PATH}/NeuralNetworks.patch"
+popd
diff --git a/shim/sl/support_library_service.cpp b/shim/sl/support_library_service.cpp
new file mode 100644
index 0000000..14556fd
--- /dev/null
+++ b/shim/sl/support_library_service.cpp
@@ -0,0 +1,18 @@
+//
+// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "canonical/ArmnnDriver.hpp"
+
+#include <nnapi/IDevice.h>
+
+namespace android::nn
+{
+
+std::vector<SharedDevice> getDevices()
+{
+    return { std::make_shared<armnn_driver::ArmnnDriver>(DriverOptions()) };
+}
+
+}  // namespace android::nn
diff --git a/src/armnnSerializer/CMakeLists.txt b/src/armnnSerializer/CMakeLists.txt
index 1239b03..919a68b 100755
--- a/src/armnnSerializer/CMakeLists.txt
+++ b/src/armnnSerializer/CMakeLists.txt
@@ -33,26 +33,39 @@
     if(BUILD_BARE_METAL)
         add_library_ex(armnnSerializer STATIC ${armnn_serializer_sources})
     else()
+        # We're going to export both an OBJECT library and a SHARED library here.
+        # In some instances it's easier to include the serializer directly into
+        # the target executable or library rather than have yet another .so.
+        add_library(armnnSerializerObj OBJECT ${armnn_serializer_sources})
         add_library_ex(armnnSerializer SHARED ${armnn_serializer_sources})
     endif()
 
     include_directories(SYSTEM "${FLATBUFFERS_INCLUDE_PATH}")
 
     set_target_properties(armnnSerializer PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
-
+    target_include_directories(armnnSerializerObj PRIVATE ../armnn)
+    target_include_directories(armnnSerializerObj PRIVATE ../armnnUtils)
     target_include_directories(armnnSerializer PRIVATE ../armnn)
     target_include_directories(armnnSerializer PRIVATE ../armnnUtils)
     target_include_directories(armnnSerializer PRIVATE ../../generated)
+    target_include_directories(armnnSerializerObj PRIVATE ../../generated)
 
     list(APPEND armnn_serializer_sources
         ArmnnSchema_generated.h
         )
 
     # System include to suppress warnings for flatbuffers generated files
+    target_include_directories(armnnSerializerObj SYSTEM PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
     target_include_directories(armnnSerializer SYSTEM PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
 
     target_link_libraries(armnnSerializer armnn ${FLATBUFFERS_LIBRARY})
 
+    install(TARGETS armnnSerializerObj
+            EXPORT  armnn-targets
+            LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+            ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+    )
+
     install(TARGETS armnnSerializer
             EXPORT  armnn-targets
             LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}