IVGCVSW-1946: Remove armnn/src from the include paths

Change-Id: I663a0a0fccb43ee960ec070121a59df9db0bb04e
diff --git a/src/backends/backendsCommon/test/ActivationFixture.hpp b/src/backends/backendsCommon/test/ActivationFixture.hpp
new file mode 100644
index 0000000..8ff77f6
--- /dev/null
+++ b/src/backends/backendsCommon/test/ActivationFixture.hpp
@@ -0,0 +1,61 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "TensorCopyUtils.hpp"
+#include "WorkloadTestUtils.hpp"
+
+#include <test/TensorHelpers.hpp>
+
+#include <boost/numeric/conversion/cast.hpp>
+#include <boost/multi_array.hpp>
+
+struct ActivationFixture
+{
+    ActivationFixture()
+    {
+        auto boostArrayExtents = boost::extents
+            [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
+            [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
+            [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
+            [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
+        output.resize(boostArrayExtents);
+        outputExpected.resize(boostArrayExtents);
+        input.resize(boostArrayExtents);
+
+        unsigned int inputShape[]  = { batchSize, channels, height, width };
+        unsigned int outputShape[] = { batchSize, channels, height, width };
+
+        inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+        outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+        input = MakeRandomTensor<float, 4>(inputTensorInfo, 21453);
+    }
+
+    unsigned int width     = 17;
+    unsigned int height    = 29;
+    unsigned int channels  = 2;
+    unsigned int batchSize = 5;
+
+    boost::multi_array<float, 4> output;
+    boost::multi_array<float, 4> outputExpected;
+    boost::multi_array<float, 4> input;
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    // Parameters used by some of the activation functions.
+    float a = 0.234f;
+    float b = -12.345f;
+};
+
+
+struct PositiveActivationFixture : public ActivationFixture
+{
+    PositiveActivationFixture()
+    {
+        input = MakeRandomTensor<float, 4>(inputTensorInfo, 2342423, 0.0f, 1.0f);
+    }
+};
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/ActivationTestImpl.hpp b/src/backends/backendsCommon/test/ActivationTestImpl.hpp
new file mode 100644
index 0000000..3b3ee93
--- /dev/null
+++ b/src/backends/backendsCommon/test/ActivationTestImpl.hpp
@@ -0,0 +1,560 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "ActivationFixture.hpp"
+#include "QuantizeHelper.hpp"
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+#include <algorithm>
+
+template<typename T>
+LayerTestResult<T, 4> BoundedReLuTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                        float upperBound, float lowerBound,
+                                        float inputScale, int32_t inputOffset, float outputScale, int32_t outputOffset,
+                                        const std::vector<T>& inputData, const std::vector<T>& outputExpectedData,
+                                        unsigned int inputWidth, unsigned int inputHeight,
+                                        unsigned int inputChannels, unsigned int inputBatchSize)
+{
+    unsigned int outputWidth = inputWidth;
+    unsigned int outputHeight = inputHeight;
+    unsigned int outputChannels = inputChannels;
+    unsigned int outputBatchSize = inputBatchSize;
+
+    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
+        armnn::GetDataType<T>());
+
+    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
+        armnn::GetDataType<T>());
+
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(inputScale);
+        inputTensorInfo.SetQuantizationOffset(inputOffset);
+
+        outputTensorInfo.SetQuantizationScale(outputScale);
+        outputTensorInfo.SetQuantizationOffset(outputOffset);
+    }
+
+    LayerTestResult<T, 4> result(inputTensorInfo);
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    // Setup bounded ReLu.
+    armnn::ActivationQueueDescriptor descriptor;
+    armnn::WorkloadInfo workloadInfo;
+    AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
+
+    descriptor.m_Parameters.m_Function = armnn::ActivationFunction::BoundedReLu;
+    descriptor.m_Parameters.m_A = upperBound;
+    descriptor.m_Parameters.m_B = lowerBound;
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
+
+    return result;
+}
+
+LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int inputWidth = 4u;
+    unsigned int inputHeight = 5u;
+    unsigned int inputChannels = 1u;
+    unsigned int inputBatchSize = 1;
+
+    std::vector<float> input = std::vector<float>{
+      -2.0f,       0.1f,     0.5f,     1.25f,
+     0.786f,    0.9875f,    -1.5f,    0.384f,
+    1.0001f,       3.5f,     7.5f,    0.896f,
+     2.126f,       2.0f,     0.3f,     0.15f,
+     0.999f,       1.2f,    0.89f,      6.1f,
+    };
+
+    // Calculated manually.
+    std::vector<float> output = std::vector<float>{
+      -1.0f,       0.1f,     0.5f,      1.0f,
+     0.786f,    0.9875f,    -1.0f,    0.384f,
+       1.0f,       1.0f,     1.0f,    0.896f,
+       1.0f,       1.0f,     0.3f,     0.15f,
+     0.999f,       1.0f,    0.89f,      1.0f,
+    };
+
+    return BoundedReLuTestCommon(workloadFactory, 1.0f, -1.0f, 1.0f, 0, 1.0f, 0, input, output,
+                                 inputWidth, inputHeight, inputChannels, inputBatchSize);
+}
+
+LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int inputWidth = 4u;
+    unsigned int inputHeight = 5u;
+    unsigned int inputChannels = 1u;
+    unsigned int inputBatchSize = 1;
+
+    std::vector<float> input = std::vector<float>{
+      -1.0f,       0.1f,     0.5f,      6.25f,
+     0.786f,    5.9875f,    -0.5f,     0.384f,
+    6.0001f,       3.5f,     7.5f,     0.896f,
+     2.126f,      12.0f,     0.3f,      0.15f,
+     0.999f,       1.2f,    0.89f,       6.1f,
+    };
+
+    // Calculated manually.
+    std::vector<float> output = std::vector<float>{
+       0.0f,       0.1f,     0.5f,       6.0f,
+     0.786f,    5.9875f,     0.0f,     0.384f,
+       6.0f,       3.5f,     6.0f,     0.896f,
+     2.126f,       6.0f,     0.3f,      0.15f,
+     0.999f,       1.2f,    0.89f,       6.0f,
+    };
+
+    return BoundedReLuTestCommon(workloadFactory, 6.0f, 0.0f, 1.0f, 0, 1.0f, 0, input, output,
+                                 inputWidth, inputHeight, inputChannels, inputBatchSize);
+}
+
+LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int inputWidth     = 3u;
+    unsigned int inputHeight    = 2u;
+    unsigned int inputChannels  = 1u;
+    unsigned int inputBatchSize = 1;
+
+    std::vector<uint8_t> input = std::vector<uint8_t>{
+         51, 124, 28,
+        251,   8, 92
+    };
+
+    // Calculated manually.
+    std::vector<uint8_t> output = std::vector<uint8_t>{
+          0, 122,  0,
+        255,   0, 58
+    };
+
+    float inputScale     = 12.0f / 255.0f;
+    int32_t inputOffset  = 63;
+    float outputScale    = 6.0f / 255.0f;
+    int32_t outputOffset = 0;
+
+    return BoundedReLuTestCommon(workloadFactory, 6.0f, 0.0f,
+                                 inputScale, inputOffset, outputScale, outputOffset,
+                                 input, output,
+                                 inputWidth, inputHeight, inputChannels, inputBatchSize);
+}
+
+LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int inputWidth     = 3u;
+    unsigned int inputHeight    = 2u;
+    unsigned int inputChannels  = 1u;
+    unsigned int inputBatchSize = 1;
+
+    std::vector<uint8_t> input = std::vector<uint8_t>{
+         51, 230, 28,
+        251,   8, 92
+    };
+
+    // Calculated manually.
+    std::vector<uint8_t> output = std::vector<uint8_t>{
+         51, 192, 32,
+        192,  32, 92
+    };
+
+    int32_t inputOffset = 112;
+    float inputScale    = 0.0125f;
+
+    return BoundedReLuTestCommon(workloadFactory, 1.0f, -1.0f,
+                                 inputScale, inputOffset, inputScale, inputOffset, // Input/output scale & offset same.
+                                 input, output,
+                                 inputWidth, inputHeight, inputChannels, inputBatchSize);
+}
+
+namespace
+{
+
+struct BoundedReLuRandomInputTestTraits
+{
+    constexpr static unsigned int inputHeight = 31u;
+    constexpr static unsigned int inputWidth = 19u;
+    constexpr static unsigned int inputChannels = 4u;
+    constexpr static unsigned int inputBatchSize = 2;
+
+    constexpr static unsigned int outputHeight = inputHeight;
+    constexpr static unsigned int outputWidth = inputWidth;
+    constexpr static unsigned int outputChannels = inputChannels;
+    constexpr static unsigned int outputBatchSize = inputBatchSize;
+
+    static armnn::TensorInfo GetInputTensorInfo()
+    {
+        return armnn::TensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
+            armnn::DataType::Float32);
+    }
+
+    static armnn::TensorInfo GetOutputTensorInfo()
+    {
+        return armnn::TensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
+            armnn::DataType::Float32);
+    }
+};
+
+boost::multi_array<float, 4> BoundedReLuRandomInputTest(armnn::IWorkloadFactory& workloadFactory,
+                                                        float lowerBound,
+                                                        float upperBound,
+                                                        const armnn::ActivationDescriptor& activationDescriptor)
+{
+    const armnn::TensorInfo inputTensorInfo = BoundedReLuRandomInputTestTraits::GetInputTensorInfo();
+    const armnn::TensorInfo outputTensorInfo = BoundedReLuRandomInputTestTraits::GetOutputTensorInfo();
+
+    boost::multi_array<float, 4> output(GetTensorShapeAsArray<4>(outputTensorInfo));
+
+    // Min/max random values passed to MakeRandomTensor are purposely outside of the ReLu
+    // range [lowerBound, upperBound].
+    auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 4605828, lowerBound - 5.0f, upperBound * 2.0f);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    // Set up bounded ReLu.
+    armnn::ActivationQueueDescriptor descriptor;
+    armnn::WorkloadInfo workloadInfo;
+    AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
+    descriptor.m_Parameters = activationDescriptor;
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&output[0][0][0][0], outputHandle.get());
+
+    return output;
+}
+
+} // namespace
+
+LayerTestResult<float, 4> CompareBoundedReLuTest(armnn::IWorkloadFactory& workloadFactory,
+                                          armnn::IWorkloadFactory& otherWorkloadFactory,
+                                          float upperBound,
+                                          float lowerBound)
+{
+    LayerTestResult<float, 4> result(BoundedReLuRandomInputTestTraits::GetOutputTensorInfo());
+
+    armnn::ActivationDescriptor activationDescriptor;
+    activationDescriptor.m_Function = armnn::ActivationFunction::BoundedReLu;
+    activationDescriptor.m_A = upperBound;
+    activationDescriptor.m_B = lowerBound;
+
+    result.output = BoundedReLuRandomInputTest(workloadFactory, 0.0f, upperBound, activationDescriptor);
+    result.outputExpected = BoundedReLuRandomInputTest(otherWorkloadFactory, 0.0f, upperBound, activationDescriptor);
+
+    return result;
+}
+
+template<typename T>
+LayerTestResult<T,4> ConstantLinearActivationTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                        float qScale = 0.0f,
+                                                        int32_t qOffset = 0)
+{
+    unsigned int inputHeight    = 20;
+    unsigned int inputWidth     = 17;
+    unsigned int inputChannels  = 3;
+    unsigned int batchSize      = 5;
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int shape[]  = {batchSize, inputChannels, inputHeight, inputWidth};
+
+    inputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
+    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    LayerTestResult<T, 4> ret(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    // Do linear activation that should leave the tensor unchanged.
+    armnn::ActivationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Parameters.m_A = 1.0f;
+    data.m_Parameters.m_B = 0.0f;
+    data.m_Parameters.m_Function = armnn::ActivationFunction::Linear;
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 7123561);
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    // Ensure output equals input.
+    ret.outputExpected = input;
+
+    return ret;
+}
+
+LayerTestResult<float, 4> ConstantLinearActivationTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return ConstantLinearActivationTestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return ConstantLinearActivationTestCommon<uint8_t>(workloadFactory, 4.0f, 3);
+}
+
+template<typename T>
+LayerTestResult<T, 4> SimpleActivationTest(armnn::IWorkloadFactory& workloadFactory,
+                                           armnn::ActivationFunction activationFunction,
+                                           float activationParameterA,
+                                           float activationParameterB,
+                                           float qScale,
+                                           int32_t qOffset,
+                                           const std::vector<float>& inputData,
+                                           const std::vector<float>& outputExpectedData)
+{
+    constexpr static unsigned int inputWidth = 16u;
+    constexpr static unsigned int inputHeight = 1u;
+    constexpr static unsigned int inputChannels = 1u;
+    constexpr static unsigned int inputBatchSize = 1u;
+
+    constexpr static unsigned int outputWidth = inputWidth;
+    constexpr static unsigned int outputHeight = inputHeight;
+    constexpr static unsigned int outputChannels = inputChannels;
+    constexpr static unsigned int outputBatchSize = inputBatchSize;
+
+    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
+                                      armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
+                                       armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    LayerTestResult<T, 4> result(inputTensorInfo);
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    // Setup bounded ReLu.
+    armnn::ActivationQueueDescriptor descriptor;
+    armnn::WorkloadInfo workloadInfo;
+    AddInputToWorkload(descriptor, workloadInfo, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, workloadInfo, outputTensorInfo, outputHandle.get());
+
+    descriptor.m_Parameters.m_Function = activationFunction;
+    descriptor.m_Parameters.m_A = activationParameterA;
+    descriptor.m_Parameters.m_B = activationParameterB;
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(descriptor, workloadInfo);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+    // Calculated manually.
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, outputExpectedData));
+
+    return result;
+}
+
+template<typename T>
+LayerTestResult<T, 4> SimpleSigmoidTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
+{
+    std::vector<float> inputData = {
+        -0.1f, -0.2f, -0.3f, -0.4f,
+        0.1f,  0.2f,  0.3f,  0.4f,
+        -1.0f, -2.0f, -3.0f, -4.0f,
+        1.0f,  2.0f,  3.0f,  4.0f
+    };
+
+    // Calculate output values for input.
+    auto f = [](float value)
+    {
+        return 1.0f / (1.0f + std::exp(-value));
+    };
+    std::vector<float> outputExpectedData(inputData.size());
+    std::transform(inputData.begin(), inputData.end(), outputExpectedData.begin(), f);
+
+    return SimpleActivationTest<T>(workloadFactory,
+                                   armnn::ActivationFunction::Sigmoid,
+                                   0.f,
+                                   0.f,
+                                   qScale,
+                                   qOffset,
+                                   inputData,
+                                   outputExpectedData);
+}
+
+LayerTestResult<float, 4> SimpleSigmoidTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return SimpleSigmoidTestCommon<float>(workloadFactory, 0.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return SimpleSigmoidTestCommon<uint8_t>(workloadFactory, 0.1f, 50);
+}
+
+template<typename T>
+LayerTestResult<T,4> CompareActivationTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                               armnn::IWorkloadFactory& refWorkloadFactory,
+                                               armnn::ActivationFunction f,
+                                               unsigned int batchSize = 5,
+                                               float qScale = 0.0f,
+                                               int32_t qOffset = 0)
+{
+    unsigned int width     = 17;
+    unsigned int height    = 29;
+    unsigned int channels  = 2;
+
+    float a = 0.234f;
+    float b = -12.345f;
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int shape[] = {batchSize, channels, height, width};
+
+    inputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
+    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    float minVal = -10.f;
+    if (f == armnn::ActivationFunction::Sqrt)
+    {
+        minVal = 0.f;
+    }
+
+    boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 21453, minVal, 10.f);
+
+
+    LayerTestResult<T,4> ret(outputTensorInfo);
+    auto boostArrayExtents = boost::extents
+        [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(batchSize)]
+    [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(channels)]
+    [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(height)]
+    [boost::numeric_cast<boost::multi_array_types::extent_gen::index>(width)];
+    ret.output.resize(boostArrayExtents);
+    ret.outputExpected.resize(boostArrayExtents);
+
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ActivationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Parameters.m_A        = a;
+    data.m_Parameters.m_B        = b;
+    data.m_Parameters.m_Function = f;
+
+    armnn::ActivationQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
+    BOOST_ASSERT(workload != nullptr);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
+    BOOST_ASSERT(workloadRef != nullptr);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    inputHandleRef->Allocate();
+    outputHandleRef->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+    workloadRef->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+
+    return ret;
+}
+
+LayerTestResult<float,4> CompareActivationTest(armnn::IWorkloadFactory& workloadFactory,
+                                               armnn::IWorkloadFactory& refWorkloadFactory,
+                                               armnn::ActivationFunction f,
+                                               unsigned int batchSize)
+{
+    return CompareActivationTestImpl<float>(workloadFactory, refWorkloadFactory, f, batchSize);
+}
+
+LayerTestResult<uint8_t,4> CompareActivationUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                      armnn::IWorkloadFactory& refWorkloadFactory,
+                                                      armnn::ActivationFunction f)
+{
+    return CompareActivationTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, f, 5, 0.1f, 50);
+}
diff --git a/src/backends/backendsCommon/test/BackendIdTests.cpp b/src/backends/backendsCommon/test/BackendIdTests.cpp
new file mode 100644
index 0000000..e11c13e
--- /dev/null
+++ b/src/backends/backendsCommon/test/BackendIdTests.cpp
@@ -0,0 +1,28 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/BackendId.hpp>
+#include <armnn/Types.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+using namespace armnn;
+
+BOOST_AUTO_TEST_SUITE(BackendIdTests)
+
+BOOST_AUTO_TEST_CASE(CreateBackendIdFromCompute)
+{
+    BackendId fromCompute{Compute::GpuAcc};
+    BOOST_TEST(fromCompute.Get() == GetComputeDeviceAsCString(Compute::GpuAcc));
+}
+
+BOOST_AUTO_TEST_CASE(CreateBackendIdVectorFromCompute)
+{
+    std::vector<BackendId> fromComputes = {Compute::GpuAcc, Compute::CpuRef};
+    BOOST_TEST(fromComputes[0].Get() == GetComputeDeviceAsCString(Compute::GpuAcc));
+    BOOST_TEST(fromComputes[1].Get() == GetComputeDeviceAsCString(Compute::CpuRef));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/test/BackendRegistryTests.cpp b/src/backends/backendsCommon/test/BackendRegistryTests.cpp
new file mode 100644
index 0000000..4afe273
--- /dev/null
+++ b/src/backends/backendsCommon/test/BackendRegistryTests.cpp
@@ -0,0 +1,103 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/Types.hpp>
+
+#include <backendsCommon/BackendRegistry.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+namespace
+{
+
+class SwapRegistryStorage : public armnn::BackendRegistry
+{
+public:
+    SwapRegistryStorage() : armnn::BackendRegistry()
+    {
+        Swap(armnn::BackendRegistryInstance(),  m_TempStorage);
+    }
+
+    ~SwapRegistryStorage()
+    {
+        Swap(armnn::BackendRegistryInstance(),m_TempStorage);
+    }
+
+private:
+    FactoryStorage m_TempStorage;
+};
+
+}
+
+BOOST_AUTO_TEST_SUITE(BackendRegistryTests)
+
+BOOST_AUTO_TEST_CASE(SwapRegistry)
+{
+    using namespace armnn;
+    auto nFactories = BackendRegistryInstance().Size();
+    {
+        SwapRegistryStorage helper;
+        BOOST_TEST(BackendRegistryInstance().Size() == 0);
+    }
+    BOOST_TEST(BackendRegistryInstance().Size() == nFactories);
+}
+
+BOOST_AUTO_TEST_CASE(TestRegistryHelper)
+{
+    using namespace armnn;
+    SwapRegistryStorage helper;
+
+    bool called = false;
+
+    StaticRegistryInitializer<BackendRegistry> factoryHelper(
+        BackendRegistryInstance(),
+        "HelloWorld",
+        [&called](const EmptyInitializer&)
+        {
+            called = true;
+            return armnn::IBackendInternalUniquePtr(nullptr);
+        }
+    );
+
+    // sanity check: the factory has not been called yet
+    BOOST_TEST(called == false);
+
+    auto factoryFunction = BackendRegistryInstance().GetFactory("HelloWorld");
+
+    // sanity check: the factory still not called
+    BOOST_TEST(called == false);
+
+    factoryFunction(EmptyInitializer());
+    BOOST_TEST(called == true);
+}
+
+BOOST_AUTO_TEST_CASE(TestDirectCallToRegistry)
+{
+    using namespace armnn;
+    SwapRegistryStorage helper;
+
+    bool called = false;
+    BackendRegistryInstance().Register(
+        "HelloWorld",
+        [&called](const EmptyInitializer&)
+        {
+            called = true;
+            return armnn::IBackendInternalUniquePtr(nullptr);
+        }
+    );
+
+    // sanity check: the factory has not been called yet
+    BOOST_TEST(called == false);
+
+    auto factoryFunction = BackendRegistryInstance().GetFactory("HelloWorld");
+
+    // sanity check: the factory still not called
+    BOOST_TEST(called == false);
+
+    factoryFunction(EmptyInitializer());
+    BOOST_TEST(called == true);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/test/BatchNormTestImpl.hpp b/src/backends/backendsCommon/test/BatchNormTestImpl.hpp
new file mode 100644
index 0000000..2360fd5
--- /dev/null
+++ b/src/backends/backendsCommon/test/BatchNormTestImpl.hpp
@@ -0,0 +1,186 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <backendsCommon/test/QuantizeHelper.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+template<typename T>
+LayerTestResult<T, 4> BatchNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                        const armnn::TensorShape& inputOutputTensorShape,
+                                        const std::vector<float>& inputValues,
+                                        const std::vector<float>& expectedOutputValues,
+                                        float qScale,
+                                        int32_t qOffset,
+                                        armnn::DataLayout dataLayout)
+{
+    armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::GetDataType<T>());
+
+    armnn::DataLayoutIndexed dataLayoutIndexed(dataLayout);
+
+    armnn::TensorInfo tensorInfo({ inputOutputTensorShape[dataLayoutIndexed.GetChannelsIndex()] },
+                                 armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+        tensorInfo.SetQuantizationScale(qScale);
+        tensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo,
+                                        QuantizedVector<T>(qScale, qOffset, inputValues));
+
+    // These values are per-channel of the input.
+    auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
+    auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4,  9}));
+    auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3,  2}));
+    auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2,  1}));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+
+    result.outputExpected = MakeTensor<T, 4>(inputTensorInfo,
+                                             QuantizedVector<T>(qScale, qOffset, expectedOutputValues));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+
+    armnn::BatchNormalizationQueueDescriptor descriptor;
+    descriptor.m_Mean                    = &meanTensor;
+    descriptor.m_Variance                = &varianceTensor;
+    descriptor.m_Beta                    = &betaTensor;
+    descriptor.m_Gamma                   = &gammaTensor;
+    descriptor.m_Parameters.m_Eps        = 0.0f;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+
+    AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
+    AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
+    AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
+    AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
+
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+    return result;
+}
+
+
+template<typename T>
+LayerTestResult<T,4> BatchNormTestNhwcImpl(armnn::IWorkloadFactory& workloadFactory,
+                                           float qScale,
+                                           int32_t qOffset)
+{
+    const unsigned int width    = 2;
+    const unsigned int height   = 3;
+    const unsigned int channels = 2;
+    const unsigned int num      = 1;
+
+    armnn::TensorInfo inputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({num, height, width, channels}, armnn::GetDataType<T>());
+    armnn::TensorInfo tensorInfo({channels}, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+        tensorInfo.SetQuantizationScale(qScale);
+        tensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset,
+        {
+            1.f, 1.f, 4.f, 1.f,
+            4.f, 4.f, 2.f, 1.f,
+            1.f, -2.f, 6.f, 4.f
+        }));
+    // These values are per-channel of the input.
+    auto mean     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, -2}));
+    auto variance = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {4, 9}));
+    auto beta     = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {3, 2}));
+    auto gamma    = MakeTensor<T, 1>(tensorInfo, QuantizedVector<T>(qScale, qOffset, {2, 1}));
+    LayerTestResult<T,4> ret(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::BatchNormalizationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+
+    AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
+    AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
+    AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
+    AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Mean             = &meanTensor;
+    data.m_Variance         = &varianceTensor;
+    data.m_Beta             = &betaTensor;
+    data.m_Gamma            = &gammaTensor;
+    data.m_Parameters.m_Eps = 0.0f;
+    data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
+
+    // For each channel:
+    // substract mean, divide by standard deviation (with an epsilon to avoid div by 0),
+    // multiply by gamma and add beta
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset,
+        {
+            1.f, 3.f, 4.f, 3.f,
+            4.f, 4.f, 2.f, 3.f,
+            1.f, 2.f, 6.f, 4.f
+        }));
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
new file mode 100644
index 0000000..ae94ad5
--- /dev/null
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -0,0 +1,41 @@
+#
+# Copyright © 2017 Arm Ltd. All rights reserved.
+# SPDX-License-Identifier: MIT
+#
+
+list(APPEND armnnBackendsCommonUnitTests_sources
+    ActivationFixture.hpp
+    ActivationTestImpl.hpp
+    BackendIdTests.cpp
+    BackendRegistryTests.cpp
+    BatchNormTestImpl.hpp
+    Conv2dTestImpl.hpp
+    ConvertFp16ToFp32TestImpl.hpp
+    ConvertFp32ToFp16TestImpl.hpp
+    EndToEndTestImpl.hpp
+    FullyConnectedTestImpl.hpp
+    IsLayerSupportedTestImpl.hpp
+    JsonPrinterTestImpl.hpp
+    LayerReleaseConstantDataTest.cpp
+    LayerTests.cpp
+    LayerTests.hpp
+    LstmTestImpl.hpp
+    NormTestImpl.hpp
+    OptimizedNetworkTests.cpp
+    PermuteTestImpl.hpp
+    Pooling2dTestImpl.hpp
+    QuantizeHelper.hpp
+    ReshapeTestImpl.hpp
+    RuntimeTestImpl.hpp
+    SoftmaxTestImpl.hpp
+    SplitterTestImpl.hpp
+    TensorCopyUtils.cpp
+    TensorCopyUtils.hpp
+    WorkloadDataValidation.cpp
+    WorkloadTestUtils.hpp
+)
+
+add_library(armnnBackendsCommonUnitTests OBJECT ${armnnBackendsCommonUnitTests_sources})
+target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnn)
+target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/armnnUtils)
+target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends)
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/Conv2dTestImpl.hpp b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
new file mode 100755
index 0000000..aa3a44d
--- /dev/null
+++ b/src/backends/backendsCommon/test/Conv2dTestImpl.hpp
@@ -0,0 +1,1240 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <string>
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+#include "QuantizeHelper.hpp"
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include "Permute.hpp"
+#include <boost/numeric/conversion/cast.hpp>
+
+// Mapping from input type to bias type for fully connected layers.
+// float => float, uint8_t => int32_t
+template<typename T>
+struct FullyConnectedBiasTypeForInputType;
+
+template<>
+struct FullyConnectedBiasTypeForInputType<float>
+{
+    using Type = float;
+};
+
+template<>
+struct FullyConnectedBiasTypeForInputType<uint8_t>
+{
+    using Type = int32_t;
+};
+
+// Modifies a std::vector in-place using a specified bias.
+template<typename T, typename B>
+void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
+    const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
+{
+    BOOST_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
+                     "Invalid type and parameter combination.");
+    BOOST_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
+                     "Invalid type and parameter combination.");
+
+    // Note we need to dequantize and re-quantize the image value and the bias.
+    for (uint32_t i = 0; i < bias.size(); ++i)
+    {
+        float dBias = SelectiveDequantize(bias[i], bScale, bOffset);
+        for (uint32_t y = 0; y < h; ++y)
+        {
+            for (uint32_t x = 0; x < w; ++x)
+            {
+                uint32_t offset = (i * h + y) * w + x;
+                BOOST_ASSERT(offset < v.size());
+                T& outRef = v[offset];
+                float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
+                outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
+            }
+        }
+    }
+}
+
+template<typename T, typename B>
+LayerTestResult<T, 4> SimpleConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                  const boost::multi_array<T, 4>& originalInput,
+                                                  const boost::multi_array<T, 4>& originalKernel,
+                                                  const boost::multi_array<B, 1>& bias,
+                                                  const boost::multi_array<T, 4>& originalOutputExpected,
+                                                  float qScale,
+                                                  int32_t qOffset,
+                                                  const armnn::DataLayoutIndexed& layout = armnn::DataLayout::NCHW,
+                                                  uint32_t padLeft = 0,
+                                                  uint32_t padTop = 0,
+                                                  uint32_t padRight = 0,
+                                                  uint32_t padBottom = 0)
+{
+    unsigned int inputHeight   = boost::numeric_cast<unsigned int>(originalInput.shape()[2]);
+    unsigned int inputWidth    = boost::numeric_cast<unsigned int>(originalInput.shape()[3]);
+    unsigned int inputChannels = boost::numeric_cast<unsigned int>(originalInput.shape()[1]);
+    unsigned int inputNum      = boost::numeric_cast<unsigned int>(originalInput.shape()[0]);
+
+    unsigned int outputHeight   = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[2]);
+    unsigned int outputWidth    = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[3]);
+    unsigned int outputChannels = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[1]);
+    unsigned int outputNum      = boost::numeric_cast<unsigned int>(originalOutputExpected.shape()[0]);
+
+    unsigned int kernelHeight = boost::numeric_cast<unsigned int>(originalKernel.shape()[2]);
+    unsigned int kernelWidth = boost::numeric_cast<unsigned int>(originalKernel.shape()[3]);
+    unsigned int kernelChannels = boost::numeric_cast<unsigned int>(originalKernel.shape()[1]);
+    unsigned int kernelDepthMul = boost::numeric_cast<unsigned int>(originalKernel.shape()[0]);
+
+    bool biasEnabled = bias.size() > 0;
+
+    // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
+    BOOST_ASSERT(inputNum == 1);
+    BOOST_ASSERT(outputNum == 1);
+
+    // If a bias is used, its size must equal the number of output channels.
+    BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+
+
+    // Note these tensors will use two (identical) batches.
+    armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(2*inputNum, inputChannels, inputHeight, inputWidth, layout);
+    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(
+            2*outputNum, outputChannels, outputHeight, outputWidth, layout);
+    armnn::TensorInfo kernelDesc = GetTensorInfo<T>(kernelDepthMul, kernelChannels, kernelHeight, kernelWidth, layout);
+    armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+        kernelDesc.SetQuantizationScale(qScale);
+        kernelDesc.SetQuantizationOffset(qOffset);
+        biasDesc.SetQuantizationScale(qScale*qScale);
+        biasDesc.SetQuantizationOffset(0);
+    }
+
+    LayerTestResult<T, 4> ret(outputTensorInfo);
+
+    // Construct input data - two batches of the same input image.
+    std::vector<T> inputImage;
+    inputImage.assign(originalInput.data(), originalInput.data() + 1*inputChannels*inputHeight*inputWidth);
+    std::vector<T> inputData;
+    inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
+    inputData.insert(inputData.end(), inputImage.begin(), inputImage.end());
+
+    // at this point if we require it permute the input data
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<T> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+        inputData = tmp;
+    }
+
+    auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    std::vector<T> outputImage;
+    outputImage.assign(originalOutputExpected.data(),
+            originalOutputExpected.data() + outputChannels*outputHeight*outputWidth);
+
+    // Apply bias to output image if it is enabled.
+    if(biasEnabled)
+    {
+        std::vector<T> biasV;
+        biasV.assign(bias.data(), bias.data() + outputChannels);
+        ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
+            biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
+            outputWidth, outputHeight);
+    }
+
+    // Construct expected output data - two identical images.
+    std::vector<T> outputData;
+    outputData.insert(outputData.end(), outputImage.begin(), outputImage.end());
+    outputData.insert(outputData.end(), outputImage.begin(), outputImage.end());
+
+    // at this point if we require it permute the expected output
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<T> tmp(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data());
+        outputData = tmp;
+    }
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
+
+    // Todo: nontrivial padding and strides.
+    uint32_t                    strideX  = 1;
+    uint32_t                    strideY  = 1;
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::Convolution2dQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+    armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+    // Permute the kernel if necessary
+    boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data());
+    }
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+
+    if(biasEnabled)
+    {
+        AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+    }
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    data.m_Weight = &weightsTensor;
+    data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
+    data.m_Parameters.m_StrideX = strideX;
+    data.m_Parameters.m_StrideY = strideY;
+    data.m_Parameters.m_PadLeft = padLeft;
+    data.m_Parameters.m_PadRight = padRight;
+    data.m_Parameters.m_PadTop = padTop;
+    data.m_Parameters.m_PadBottom = padBottom;
+    data.m_Parameters.m_BiasEnabled = biasEnabled;
+    data.m_Parameters.m_DataLayout = layout.GetDataLayout();
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+template<typename T, typename B>
+LayerTestResult<T, 4> SimpleConvolution2dNhwcTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                      const boost::multi_array<T, 4>& input,
+                                                      const boost::multi_array<T, 4>& kernel,
+                                                      const boost::multi_array<B, 1>& bias,
+                                                      const boost::multi_array<T, 4>& outputExpected,
+                                                      armnn::DataLayout dataLayout,
+                                                      float qScale,
+                                                      int32_t qOffset,
+                                                      uint32_t padLeft = 1,
+                                                      uint32_t padTop = 1,
+                                                      uint32_t padRight = 1,
+                                                      uint32_t padBottom = 1,
+                                                      uint32_t strideX  = 1,
+                                                      uint32_t strideY  = 1)
+{
+    unsigned int inputNum       = boost::numeric_cast<unsigned int>(input.shape()[0]);
+    unsigned int inputChannels  = boost::numeric_cast<unsigned int>(input.shape()[3]);
+    unsigned int inputHeight    = boost::numeric_cast<unsigned int>(input.shape()[1]);
+    unsigned int inputWidth     = boost::numeric_cast<unsigned int>(input.shape()[2]);
+
+    unsigned int kernelChanMul  = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
+    unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
+    unsigned int kernelHeight   = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
+    unsigned int kernelWidth    = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
+
+    unsigned int outputNum      = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+    unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
+    unsigned int outputHeight   = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+    unsigned int outputWidth    = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
+
+    bool biasEnabled = bias.size() > 0;
+
+    // Creates the tensors.
+    armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
+                                       armnn::GetDataType<T>());
+    armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, armnn::GetDataType<T>());
+    armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+
+    // Construct the input data.
+    std::vector<T> inputData;
+    inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
+    auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    // Construct the output data, with bias applied, as appropriate.
+    std::vector<T> outputData;
+    outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
+
+    LayerTestResult<T, 4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+
+    armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+
+    armnn::Convolution2dQueueDescriptor data;
+
+    data.m_Weight = &weightsTensor;
+    data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - can be a source of bugs.
+    data.m_Parameters.m_StrideX = strideX;
+    data.m_Parameters.m_StrideY = strideY;
+    data.m_Parameters.m_PadLeft = padLeft;
+    data.m_Parameters.m_PadRight = padRight;
+    data.m_Parameters.m_PadTop = padTop;
+    data.m_Parameters.m_PadBottom = padBottom;
+    data.m_Parameters.m_BiasEnabled = biasEnabled;
+    data.m_Parameters.m_DataLayout = dataLayout;
+
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+template<typename T, typename B>
+LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                               const boost::multi_array<T, 4>& input,
+                                                               const boost::multi_array<T, 4>& originalKernel,
+                                                               const boost::multi_array<B, 1>& bias,
+                                                               const boost::multi_array<T, 4>& outputExpected,
+                                                               float qScale,
+                                                               int32_t qOffset,
+                                                               const armnn::DataLayoutIndexed& layout,
+                                                               uint32_t padLeft = 0,
+                                                               uint32_t padTop = 0,
+                                                               uint32_t padRight = 0,
+                                                               uint32_t padBottom = 0,
+                                                               uint32_t strideX = 1,
+                                                               uint32_t strideY = 1)
+{
+    unsigned int inputNum       = boost::numeric_cast<unsigned int>(input.shape()[0]);
+    unsigned int inputChannels  = boost::numeric_cast<unsigned int>(input.shape()[1]);
+    unsigned int inputHeight    = boost::numeric_cast<unsigned int>(input.shape()[2]);
+    unsigned int inputWidth     = boost::numeric_cast<unsigned int>(input.shape()[3]);
+    unsigned int kernelChanMul  = boost::numeric_cast<unsigned int>(originalKernel.shape()[0]);
+    unsigned int kernelChannels = boost::numeric_cast<unsigned int>(originalKernel.shape()[1]);
+    unsigned int kernelHeight   = boost::numeric_cast<unsigned int>(originalKernel.shape()[2]);
+    unsigned int kernelWidth    = boost::numeric_cast<unsigned int>(originalKernel.shape()[3]);
+    unsigned int outputNum      = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+    unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+    unsigned int outputHeight   = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
+    unsigned int outputWidth    = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
+
+    // If a bias is used, its size must equal the number of output channels.
+    bool biasEnabled = bias.size() > 0;
+    BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+
+    // Creates the tensors.
+    armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
+    armnn::TensorInfo kernelDesc = GetTensorInfo<T>(kernelChanMul, kernelChannels, kernelHeight, kernelWidth, layout);
+    armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+        kernelDesc.SetQuantizationScale(qScale);
+        kernelDesc.SetQuantizationOffset(qOffset);
+        biasDesc.SetQuantizationScale(qScale*qScale);
+        biasDesc.SetQuantizationOffset(0);
+    }
+
+    // Construct the input data.
+    std::vector<T> inputData;
+    inputData.assign(input.data(), input.data() + inputChannels*inputHeight*inputWidth);
+
+    // At this point if we require it permute the input data
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<T> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+        inputData = tmp;
+    }
+
+    auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    // Construct the output data, with bias applied, as appropriate.
+    std::vector<T> outputData;
+    outputData.assign(outputExpected.data(), outputExpected.data() + outputChannels*outputHeight*outputWidth);
+    if (biasEnabled)
+    {
+        std::vector<T> biasV;
+        biasV.assign(bias.data(), bias.data() + outputChannels);
+        ApplyBias(outputData, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
+            biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
+            outputWidth, outputHeight);
+    }
+
+    LayerTestResult<T, 4> ret(outputTensorInfo);
+
+    // At this point if we require it permute the expected output
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<T> tmp(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp.data());
+        outputData = tmp;
+    }
+
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+
+    // Permute the kernel if necessary
+    boost::multi_array<T, 4> kernel = boost::multi_array<T, 4>(originalKernel);
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernel.data(), kernel.data());
+    }
+
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+
+    armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+    if (biasEnabled)
+    {
+        AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+    }
+
+    armnn::DepthwiseConvolution2dQueueDescriptor data;
+    data.m_Weight = &weightsTensor;
+    data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
+    data.m_Parameters.m_StrideX = strideX;
+    data.m_Parameters.m_StrideY = strideY;
+    data.m_Parameters.m_PadLeft = padLeft;
+    data.m_Parameters.m_PadRight = padRight;
+    data.m_Parameters.m_PadTop = padTop;
+    data.m_Parameters.m_PadBottom = padBottom;
+    data.m_Parameters.m_BiasEnabled = biasEnabled;
+    data.m_Parameters.m_DataLayout = layout.GetDataLayout();
+
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+template<typename T, typename B>
+LayerTestResult<T, 4> DepthwiseConvolution2dDepthMul1TestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                              float qScale,
+                                                              int32_t qOffset,
+                                                              bool biasEnabled,
+                                                              const armnn::DataLayoutIndexed& layout)
+{
+    unsigned int inputHeight = 3;
+    unsigned int inputWidth = 3;
+    unsigned int inputChannels = 2;
+    unsigned int inputNum = 1;
+
+    unsigned int kernelHeight = 3;
+    unsigned int kernelWidth = 3;
+    unsigned int kernelChannels = inputChannels;
+
+    unsigned int outputHeight = 1;
+    unsigned int outputWidth = 1;
+    unsigned int outputChannels = kernelChannels;
+    unsigned int outputNum = inputNum;
+
+    armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputNum, inputChannels, inputHeight, inputWidth, layout);
+    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputNum, outputChannels, outputHeight, outputWidth, layout);
+    armnn::TensorInfo kernelDesc = GetTensorInfo<T>(1, outputChannels, kernelHeight, kernelWidth, layout);
+    armnn::TensorInfo biasDesc({ outputChannels }, armnn::GetDataType<B>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+        kernelDesc.SetQuantizationScale(qScale);
+        kernelDesc.SetQuantizationOffset(qOffset);
+        biasDesc.SetQuantizationScale(qScale*qScale);
+        biasDesc.SetQuantizationOffset(0);
+    }
+    std::vector<T> inputData = std::vector<T>(
+            QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
+                    1.f, 2.f, 1.f,
+                    2.f, 1.f, 2.f,
+                    1.f, 2.f, 1.f,
+
+                    1.f, 2.f, 1.f,
+                    2.f, 1.f, 2.f,
+                    1.f, 2.f, 1.f,
+            }));
+    // at this point if we require it permute the input data
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<T> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+        inputData = tmp;
+    }
+    auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
+                                            {0, 2}));
+    auto bias = MakeTensor<B, 1>(biasDesc, biasV);
+
+    std::vector<T> kernelData = std::vector<T>(
+            QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), {
+                    1.f, 0.f,  1.f,
+                    0.f, 0.f,  0.f,
+                    -1.f, 0.f, -1.f,
+
+                    1.f, 0.f,  1.f,
+                    0.f, 0.f,  0.f,
+                    -1.f, 0.f, -1.f,
+            }));
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<T> tmp(kernelData.size());
+        armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, kernelData.data(), tmp.data());
+        kernelData = tmp;
+    }
+    auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
+
+    // Manually calculated.
+    std::vector<T> outputImage(
+        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+                           outputTensorInfo.GetQuantizationOffset(),
+                           {0.f, 0.f})
+    );
+
+    // Optionally apply bias to output image.
+    if(biasEnabled)
+    {
+        ApplyBias(outputImage, outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(),
+                  biasV, biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
+                  outputWidth, outputHeight);
+    }
+
+    LayerTestResult<T, 4> ret(outputTensorInfo);
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<T> tmp(outputImage.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputImage.data(), tmp.data());
+        outputImage = tmp;
+    }
+
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::DepthwiseConvolution2dQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+    armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+    AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    data.m_Weight = &weightsTensor;
+    data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
+    data.m_Parameters.m_StrideX = 1;
+    data.m_Parameters.m_StrideY = 1;
+    data.m_Parameters.m_PadLeft = 0;
+    data.m_Parameters.m_PadRight = 0;
+    data.m_Parameters.m_PadTop = 0;
+    data.m_Parameters.m_PadBottom = 0;
+    data.m_Parameters.m_BiasEnabled = biasEnabled;
+    data.m_Parameters.m_DataLayout = layout.GetDataLayout();
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+template<typename T, typename B>
+LayerTestResult<T, 4> DepthwiseConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                     float qScale,
+                                                     int32_t qOffset,
+                                                     bool biasEnabled,
+                                                     const armnn::DataLayoutIndexed& layout)
+{
+    unsigned int depthMultiplier = 2;
+
+    unsigned int inputHeight    = 8;
+    unsigned int inputWidth     = 16;
+    unsigned int inputChannels  = 2;
+    unsigned int inputBatchSize = 1;
+
+    unsigned int kernelHeight = 5;
+    unsigned int kernelWidth  = 3;
+
+    unsigned int outputHeight    = inputHeight - kernelHeight + 1 + 2;
+    unsigned int outputWidth     = (inputWidth - kernelWidth + 1)/2;
+    unsigned int outputChannels  = inputChannels * depthMultiplier;
+    unsigned int outputBatchSize = inputBatchSize;
+
+    armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(
+            inputBatchSize, inputChannels, inputHeight, inputWidth, layout);
+    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(
+            outputBatchSize, outputChannels, outputHeight, outputWidth, layout);
+    armnn::TensorInfo kernelDesc = GetTensorInfo<T>(
+            depthMultiplier, inputChannels, kernelHeight, kernelWidth, layout);
+    armnn::TensorInfo biasDesc({outputChannels}, armnn::GetDataType<B>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+        kernelDesc.SetQuantizationScale(qScale);
+        kernelDesc.SetQuantizationOffset(qOffset);
+        biasDesc.SetQuantizationScale(qScale*qScale);
+        biasDesc.SetQuantizationOffset(0);
+    }
+
+    // NOTE: originalInputData is in NCHW format
+    std::vector<T> originalInputData = std::vector<T>(
+            QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
+                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                    0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+                    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+            }));
+    std::vector<T> inputData = originalInputData;
+    // at this point if we require it permute the input data
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, originalInputData.data(), inputData.data());
+    }
+    auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    std::vector<B> biasV(QuantizedVector<B>(biasDesc.GetQuantizationScale(), biasDesc.GetQuantizationOffset(),
+        {0, 2, 1, -1}));
+    auto bias = MakeTensor<B, 1>(biasDesc, biasV);
+
+    std::vector<T> originalKernelData = std::vector<T>(
+            QuantizedVector<T>(kernelDesc.GetQuantizationScale(), kernelDesc.GetQuantizationOffset(), {
+                    1, 1, 1,
+                    1, -1, 1,
+                    1, 1, 1,
+                    1, 1, 1,
+                    1, 1, 1,
+
+                    2, 2, 2,
+                    2, 2, 2,
+                    2, 2, 2,
+                    2, 2, 2,
+                    2, 2, 2,
+
+                    0, 0, 0,
+                    0, -1, 0,
+                    0, 0, 0,
+                    0, 0, 0,
+                    0, 0, 0,
+
+                    0, 0, 0,
+                    0, 0, 0,
+                    0, 1, 0,
+                    0, 0, 0,
+                    0, 0, 0
+            }));
+    std::vector<T> kernelData = originalKernelData;
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        armnnUtils::Permute(kernelDesc.GetShape(), NCHWToNHWC, originalKernelData.data(), kernelData.data());
+    }
+    auto kernel = MakeTensor<T, 4>(kernelDesc, kernelData);
+
+    // Manually calculated.
+    std::vector<T> originalOutputImage = std::vector<T>(
+        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
+            3.5f,  3.5f,  3.5f,  3.5f,  3.5f,  3.5f,  3.5f,
+            6.0f,  6.0f,  6.0f,  6.0f,  6.0f,  6.0f,  6.0f,
+            5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,
+            6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,
+            6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,  6.5f,
+            5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,  5.0f,
+
+            -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
+            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+            -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
+            -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
+            -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
+            -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f, -0.5f,
+
+            8.0f,  8.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+            10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+            10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+            10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+            10.0f, 10.0f, 0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+            8.0f,  8.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+
+            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,
+            0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f,  0.0f
+        }));
+
+    // Optionally apply bias to output image.
+    if(biasEnabled)
+    {
+        ApplyBias(originalOutputImage,
+                  outputTensorInfo.GetQuantizationScale(),
+                  outputTensorInfo.GetQuantizationOffset(),
+                  biasV,
+                  biasDesc.GetQuantizationScale(),
+                  biasDesc.GetQuantizationOffset(),
+                  outputWidth,
+                  outputHeight);
+    }
+
+    LayerTestResult<T, 4> ret(outputTensorInfo);
+    std::vector<T> outputImage = originalOutputImage;
+    if (layout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, originalOutputImage.data(), outputImage.data());
+    }
+
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputImage);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::DepthwiseConvolution2dQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+    armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+    AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    data.m_Weight = &weightsTensor;
+    data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled.
+    data.m_Parameters.m_StrideX = 2;
+    data.m_Parameters.m_StrideY = 1;
+    data.m_Parameters.m_PadLeft = 0;
+    data.m_Parameters.m_PadRight = 0;
+    data.m_Parameters.m_PadTop = 1;
+    data.m_Parameters.m_PadBottom = 1;
+    data.m_Parameters.m_BiasEnabled = biasEnabled;
+    data.m_Parameters.m_DataLayout = layout.GetDataLayout();
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+template<typename T, typename B>
+LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                         const boost::multi_array<T, 4>& input,
+                                                         const boost::multi_array<T, 4>& kernel,
+                                                         const boost::multi_array<B, 1>& bias,
+                                                         const boost::multi_array<T, 4>& outputExpected,
+                                                         float qScale,
+                                                         int32_t qOffset,
+                                                         uint32_t padLeft = 0,
+                                                         uint32_t padTop = 0,
+                                                         uint32_t padRight = 0,
+                                                         uint32_t padBottom = 0,
+                                                         uint32_t strideX = 1,
+                                                         uint32_t strideY = 1)
+{
+    unsigned int inputNum       = boost::numeric_cast<unsigned int>(input.shape()[0]);
+    unsigned int inputChannels  = boost::numeric_cast<unsigned int>(input.shape()[3]);
+    unsigned int inputHeight    = boost::numeric_cast<unsigned int>(input.shape()[1]);
+    unsigned int inputWidth     = boost::numeric_cast<unsigned int>(input.shape()[2]);
+
+    unsigned int kernelChanMul  = boost::numeric_cast<unsigned int>(kernel.shape()[0]);
+    unsigned int kernelChannels = boost::numeric_cast<unsigned int>(kernel.shape()[3]);
+    unsigned int kernelHeight   = boost::numeric_cast<unsigned int>(kernel.shape()[1]);
+    unsigned int kernelWidth    = boost::numeric_cast<unsigned int>(kernel.shape()[2]);
+
+    unsigned int outputNum      = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+    unsigned int outputChannels = boost::numeric_cast<unsigned int>(outputExpected.shape()[3]);
+    unsigned int outputHeight   = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+    unsigned int outputWidth    = boost::numeric_cast<unsigned int>(outputExpected.shape()[2]);
+
+    // Creates the tensors.
+    armnn::TensorInfo inputTensorInfo({inputNum, inputHeight, inputWidth, inputChannels}, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({outputNum, outputHeight, outputWidth, outputChannels},
+                                       armnn::GetDataType<T>());
+    armnn::TensorInfo kernelDesc({kernelChanMul, kernelHeight, kernelWidth, kernelChannels}, armnn::GetDataType<T>());
+    armnn::TensorInfo biasDesc({static_cast<unsigned int>(bias.size())}, armnn::GetDataType<B>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+        kernelDesc.SetQuantizationScale(qScale);
+        kernelDesc.SetQuantizationOffset(qOffset);
+        biasDesc.SetQuantizationScale(qScale*qScale);
+        biasDesc.SetQuantizationOffset(0);
+    }
+
+    // Construct the input data.
+    std::vector<T> inputData;
+    inputData.assign(input.data(), input.data() + inputHeight*inputWidth*inputChannels);
+    auto batchedInput = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    // Construct the output data, with bias applied, as appropriate.
+    std::vector<T> outputData;
+    outputData.assign(outputExpected.data(), outputExpected.data() + outputHeight*outputWidth*outputChannels);
+
+    LayerTestResult<T, 4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+
+    armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+
+    armnn::DepthwiseConvolution2dQueueDescriptor data;
+    data.m_Weight = &weightsTensor;
+    data.m_Bias = &biasTensor; // Still set this whether or not bias is enabled - it can be a source of bugs.
+    data.m_Parameters.m_StrideX = strideX;
+    data.m_Parameters.m_StrideY = strideY;
+    data.m_Parameters.m_PadLeft = padLeft;
+    data.m_Parameters.m_PadRight = padRight;
+    data.m_Parameters.m_PadTop = padTop;
+    data.m_Parameters.m_PadBottom = padBottom;
+    data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
+
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &batchedInput[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+template<typename T>
+LayerTestResult<T,4> Convolution1dTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                           float qScale,
+                                           int32_t qOffset,
+                                           bool biasEnabled)
+{
+    using B = typename FullyConnectedBiasTypeForInputType<T>::Type;
+
+    // Until we have a specialist 1D convolution layer, we can fake one using
+    // 2D convolution with the final dimension set to 1.
+    // I don't anticipate this being particularly slow, given that convolution is implemented
+    // as a matrix multiplication, at which point dimension doesn't matter.
+
+    unsigned int batchSize      = 1;
+    unsigned int inputChannels  = 2;
+    unsigned int outputChannels = 3;
+    unsigned int inputSize      = 5; // The 1D size (could view as 'width' or 'height').
+    unsigned int kernelSize     = 3;
+    unsigned int padSize        = 2;
+    unsigned int stride         = 1;
+    unsigned int outputSize     = 7; // (inputSize + 2 * padSize - kernelSize + 1) / stride.
+
+    armnn::TensorInfo inputInfo({batchSize, inputChannels, inputSize, 1}, armnn::GetDataType<T>());
+    armnn::TensorInfo outputInfo({batchSize, outputChannels, outputSize, 1}, armnn::GetDataType<T>());
+    armnn::TensorInfo kernelInfo({outputChannels, inputChannels, kernelSize, 1}, armnn::GetDataType<T>());
+    armnn::TensorInfo biasInfo({outputChannels}, armnn::GetDataType<B>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputInfo.SetQuantizationScale(qScale);
+        inputInfo.SetQuantizationOffset(qOffset);
+        outputInfo.SetQuantizationScale(qScale);
+        outputInfo.SetQuantizationOffset(qOffset);
+        kernelInfo.SetQuantizationScale(qScale);
+        kernelInfo.SetQuantizationOffset(qOffset);
+        biasInfo.SetQuantizationScale(inputInfo.GetQuantizationScale()*kernelInfo.GetQuantizationScale());
+        biasInfo.SetQuantizationOffset(0);
+    }
+
+    std::vector<T> inputData(
+        QuantizedVector<T>(inputInfo.GetQuantizationScale(), inputInfo.GetQuantizationOffset(), {
+            5.0f, -2.0f, 2.5f, 0.0f, 1.0f,
+            -3.0f, 3.2f, 5.0f, 2.0f, 3.0f,
+        }));
+
+    std::vector<T> kernelData(
+        QuantizedVector<T>(kernelInfo.GetQuantizationScale(), kernelInfo.GetQuantizationOffset(), {
+            1.0f, 0.0f, 0.0f,
+            0.0f, 2.0f, -1.5f,
+
+            0.0f, 0.0f, 0.0f,
+            0.2f, 0.2f, 0.2f,
+
+            0.5f, 0.0f, 0.5f,
+            0.0f, -1.0f, 0.0f
+        }));
+
+    std::vector<B> biasData(
+        QuantizedVector<B>(biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(), {
+            1.0f, 0.0f, 0.0f
+        }));
+
+    std::vector<T> outputData(
+        QuantizedVector<T>(outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(), {
+            4.5f, -10.8f, 5.0f + 6.4f - 7.5f, -2.0f + 10.0f -3.0f, 2.5f + 4.0f - 4.5f, 6.0f, 1.0f,
+            -0.6f, -0.6f + 0.64f, -0.6f + 0.64f + 1.0f, 0.64f + 1.0f + 0.4f, 1.0f + 0.4f + 0.6f, 0.4f + 0.6f, 0.6f,
+            2.5f, -1.0f + 3.0f, 1.25f - 3.2f + 2.5f, -1.0f - 5.0f, 1.25f + 0.5f - 2.0f, -3.0f, 0.5f
+        }));
+
+    // Optionally apply bias to output image.
+    if(biasEnabled)
+    {
+        ApplyBias(outputData, outputInfo.GetQuantizationScale(), outputInfo.GetQuantizationOffset(),
+            biasData, biasInfo.GetQuantizationScale(), biasInfo.GetQuantizationOffset(),
+            1, outputSize);
+    }
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputInfo);
+
+    armnn::Convolution2dQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    armnn::ScopedCpuTensorHandle         weightsTensor(kernelInfo);
+    armnn::ScopedCpuTensorHandle         biasTensor(biasInfo);
+
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, kernelData.data());
+    AllocateAndCopyDataToITensorHandle(&biasTensor, biasData.data());
+
+    AddInputToWorkload(data, info, inputInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputInfo, outputHandle.get());
+
+    data.m_Weight         = &weightsTensor;
+    data.m_Bias           = &biasTensor;
+    data.m_Parameters.m_StrideX        = 1;
+    data.m_Parameters.m_StrideY        = stride;
+    data.m_Parameters.m_PadLeft        = 0;
+    data.m_Parameters.m_PadRight       = 0;
+    data.m_Parameters.m_PadTop         = padSize;
+    data.m_Parameters.m_PadBottom      = padSize;
+    data.m_Parameters.m_BiasEnabled    = biasEnabled;
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvolution2d(data, info);
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), inputData.data());
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    // Output
+    LayerTestResult<T,4> ret(outputInfo);
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+    ret.outputExpected = MakeTensor<T, 4>(outputInfo, outputData);
+    return ret;
+}
+
+
+
+template<typename T>
+LayerTestResult<T,4> CompareConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                armnn::IWorkloadFactory& refWorkloadFactory)
+{
+    unsigned int inputHeight   = 8;
+    unsigned int inputWidth    = 16;
+    unsigned int inputChannels = 3;
+    unsigned int inputNum      = 5;
+
+    unsigned int kernelHeight = 3;
+    unsigned int kernelWidth  = 3;
+
+    unsigned int strideX = 2;
+    unsigned int strideY = 3;
+    unsigned int padX    = 1;
+    unsigned int padY    = 1;
+
+    unsigned int outputNum      = inputNum;
+    unsigned int outputChannels = 2;
+    unsigned int outputHeight   = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
+    unsigned int outputWidth    = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+    armnn::TensorInfo kernelDesc;
+    armnn::TensorInfo biasDesc;
+
+    unsigned int inputShape[]    = {inputNum, inputChannels, inputHeight, inputWidth};
+    unsigned int outputShape[]   = {outputNum, outputChannels, outputHeight, outputWidth};
+    unsigned int kernelShape[]   = {outputChannels, inputChannels, kernelHeight, kernelWidth};
+    unsigned int biasShape[]     = {outputChannels};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+    kernelDesc = armnn::TensorInfo(4, kernelShape, armnn::GetDataType<T>());
+    biasDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType<T>());
+
+    LayerTestResult<T,4> ret(outputTensorInfo);
+
+    auto input  = MakeRandomTensor<T, 4>(inputTensorInfo, 124908);
+    auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234);
+    auto bias   = MakeRandomTensor<T, 1>(biasDesc, 1028);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::Convolution2dQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+    armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+    AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Weight = &weightsTensor;
+    data.m_Bias = &biasTensor;
+    data.m_Parameters.m_StrideX = strideX;
+    data.m_Parameters.m_StrideY = strideY;
+    data.m_Parameters.m_PadLeft = padX;
+    data.m_Parameters.m_PadRight = padX;
+    data.m_Parameters.m_PadTop = padY;
+    data.m_Parameters.m_PadBottom = padY;
+    data.m_Parameters.m_BiasEnabled = true;
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
+
+    armnn::Convolution2dQueueDescriptor refData = data;
+    armnn::WorkloadInfo               refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+
+    std::unique_ptr<armnn::IWorkload> workload  = workloadFactory.CreateConvolution2d(data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateConvolution2d(refData, refInfo);
+
+    outputHandleRef->Allocate();
+    inputHandleRef->Allocate();
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+    refWorkloadFactory.Finalize();
+    workloadRef->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+
+    return ret;
+}
+
+template<typename T>
+LayerTestResult<T, 4> CompareDepthwiseConvolution2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                            armnn::IWorkloadFactory& refWorkloadFactory,
+                                                            const armnn::DataLayoutIndexed& layout)
+{
+    unsigned int inputHeight = 8;
+    unsigned int inputWidth = 16;
+    unsigned int inputChannels = 3;
+    unsigned int inputNum = 5;
+
+    unsigned int kernelHeight = 3;
+    unsigned int kernelWidth = 3;
+    unsigned int channelMultiplier = 1;
+
+    unsigned int strideX = 2;
+    unsigned int strideY = 3;
+    unsigned int padX = 1;
+    unsigned int padY = 1;
+
+    unsigned int outputNum = inputNum;
+    unsigned int outputChannels = inputChannels * channelMultiplier;
+    unsigned int outputHeight = (inputHeight + 2 * padY - kernelHeight + strideY) / strideY;
+    unsigned int outputWidth = (inputWidth + 2 * padX - kernelWidth + strideX) / strideX;
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+    armnn::TensorInfo kernelDesc;
+    armnn::TensorInfo biasDesc;
+
+
+    std::vector<unsigned int> inputShape;
+    std::vector<unsigned int> outputShape;
+    std::vector<unsigned int> kernelShape;
+    std::vector<unsigned int> biasShape= { outputChannels };
+    switch (layout.GetDataLayout())
+    {
+        case armnn::DataLayout::NCHW:
+            inputShape =  { inputNum, inputChannels, inputHeight, inputWidth };
+            outputShape = { outputNum, outputChannels, outputHeight, outputWidth };
+            kernelShape = { channelMultiplier, inputChannels, kernelHeight, kernelWidth };
+            break;
+        case armnn::DataLayout ::NHWC:
+            inputShape =  { inputNum, inputHeight, inputWidth, inputChannels };
+            outputShape = { outputNum, outputHeight, outputWidth, outputChannels };
+            kernelShape = { channelMultiplier, kernelHeight, kernelWidth, inputChannels };
+            break;
+        default:
+            throw armnn::InvalidArgumentException("unknown data layout ["
+                                                  + std::to_string(static_cast<int>(layout.GetDataLayout())) + "]");
+    }
+
+    float inputsQScale = armnn::IsQuantizedType<T>() ? 1.0f : 0;
+    float outputQScale = armnn::IsQuantizedType<T>() ? 2.0f : 0;
+    int32_t qOffset = 0;
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape.data(), armnn::GetDataType<T>(), inputsQScale, qOffset);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape.data(), armnn::GetDataType<T>(), outputQScale, qOffset);
+    kernelDesc = armnn::TensorInfo(4, kernelShape.data(), armnn::GetDataType<T>(), inputsQScale, qOffset);
+    biasDesc = armnn::TensorInfo(
+            1, biasShape.data(), armnn::GetBiasDataType(armnn::GetDataType<T>()), inputsQScale, qOffset);
+
+    LayerTestResult<T, 4> ret(outputTensorInfo);
+
+    auto input = MakeRandomTensor<T, 4>(inputTensorInfo, 124908, 0.0f, 255.0f);
+    auto kernel = MakeRandomTensor<T, 4>(kernelDesc, 891234, 0.0f, 255.0f);
+    auto bias = MakeRandomTensor<typename FullyConnectedBiasTypeForInputType<T>::Type, 1>(
+            biasDesc, 1028, 0.0f, 255.0f);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::DepthwiseConvolution2dQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    armnn::ScopedCpuTensorHandle weightsTensor(kernelDesc);
+    armnn::ScopedCpuTensorHandle biasTensor(biasDesc);
+
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, &kernel[0][0][0][0]);
+    AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Weight = &weightsTensor;
+    data.m_Bias = &biasTensor;
+    data.m_Parameters.m_StrideX = strideX;
+    data.m_Parameters.m_StrideY = strideY;
+    data.m_Parameters.m_PadLeft = padX;
+    data.m_Parameters.m_PadRight = padX;
+    data.m_Parameters.m_PadTop = padY;
+    data.m_Parameters.m_PadBottom = padY;
+    data.m_Parameters.m_BiasEnabled = true;
+    data.m_Parameters.m_DataLayout = layout.GetDataLayout();
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
+
+    armnn::DepthwiseConvolution2dQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDepthwiseConvolution2d(data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateDepthwiseConvolution2d(refData, refInfo);
+
+    outputHandleRef->Allocate();
+    inputHandleRef->Allocate();
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+    refWorkloadFactory.Finalize();
+    workloadRef->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+
+    return ret;
+}
diff --git a/src/backends/backendsCommon/test/ConvertFp16ToFp32TestImpl.hpp b/src/backends/backendsCommon/test/ConvertFp16ToFp32TestImpl.hpp
new file mode 100644
index 0000000..a63f0cb
--- /dev/null
+++ b/src/backends/backendsCommon/test/ConvertFp16ToFp32TestImpl.hpp
@@ -0,0 +1,54 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <Half.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    using namespace half_float::literal;
+
+    const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
+    const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
+
+    auto input = MakeTensor<armnn::Half, 4>(inputTensorInfo,
+        { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
+          1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
+
+    LayerTestResult<float, 4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
+        { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+          1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ConvertFp16ToFp32QueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp16ToFp32(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
diff --git a/src/backends/backendsCommon/test/ConvertFp32ToFp16TestImpl.hpp b/src/backends/backendsCommon/test/ConvertFp32ToFp16TestImpl.hpp
new file mode 100644
index 0000000..3513823
--- /dev/null
+++ b/src/backends/backendsCommon/test/ConvertFp32ToFp16TestImpl.hpp
@@ -0,0 +1,55 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <Half.hpp>
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+
+LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    using namespace half_float::literal;
+
+    const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
+
+    auto input = MakeTensor<float, 4>(inputTensorInfo,
+        { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+          1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
+
+    LayerTestResult<armnn::Half, 4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<armnn::Half, 4>(outputTensorInfo,
+        { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
+          1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ConvertFp32ToFp16QueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConvertFp32ToFp16(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
new file mode 100644
index 0000000..e16116e
--- /dev/null
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -0,0 +1,102 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+
+#include <backendsCommon/test/QuantizeHelper.hpp>
+
+#include <vector>
+
+namespace
+{
+
+using namespace armnn;
+
+template<typename T>
+bool ConstantUsageTest(const std::vector<BackendId>& computeDevice,
+                       const TensorInfo& commonTensorInfo,
+                       const std::vector<T>& inputData,
+                       const std::vector<T>& constantData,
+                       const std::vector<T>& expectedOutputData)
+{
+    // Create runtime in which test will run
+    IRuntime::CreationOptions options;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // Builds up the structure of the network.
+    INetworkPtr net(INetwork::Create());
+
+    IConnectableLayer* input = net->AddInputLayer(0);
+    IConnectableLayer* constant = net->AddConstantLayer(ConstTensor(commonTensorInfo, constantData));
+    IConnectableLayer* add = net->AddAdditionLayer();
+    IConnectableLayer* output = net->AddOutputLayer(0);
+
+    input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+    constant->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+    add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    // Sets the tensors in the network.
+    input->GetOutputSlot(0).SetTensorInfo(commonTensorInfo);
+    constant->GetOutputSlot(0).SetTensorInfo(commonTensorInfo);
+    add->GetOutputSlot(0).SetTensorInfo(commonTensorInfo);
+
+    // optimize the network
+    IOptimizedNetworkPtr optNet = Optimize(*net, computeDevice, runtime->GetDeviceSpec());
+
+    // Loads it into the runtime.
+    NetworkId netId;
+    runtime->LoadNetwork(netId, std::move(optNet));
+
+    // Creates structures for input & output.
+    std::vector<T> outputData(inputData.size());
+
+    InputTensors inputTensors
+    {
+        {0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+    };
+    OutputTensors outputTensors
+    {
+        {0, Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
+    };
+
+    // Does the inference.
+    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+    // Checks the results.
+    return outputData == expectedOutputData;
+}
+
+inline bool ConstantUsageFloat32Test(const std::vector<BackendId>& backends)
+{
+    const TensorInfo commonTensorInfo({ 2, 3 }, DataType::Float32);
+
+    return ConstantUsageTest(backends,
+        commonTensorInfo,
+        std::vector<float>{ 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }, // Input.
+        std::vector<float>{ 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }, // Const input.
+        std::vector<float>{ 7.f, 7.f, 7.f, 7.f, 7.f, 7.f }  // Expected output.
+    );
+}
+
+inline bool ConstantUsageUint8Test(const std::vector<BackendId>& backends)
+{
+    TensorInfo commonTensorInfo({ 2, 3 }, DataType::QuantisedAsymm8);
+
+    const float scale = 0.023529f;
+    const int8_t offset = -43;
+
+    commonTensorInfo.SetQuantizationScale(scale);
+    commonTensorInfo.SetQuantizationOffset(offset);
+
+    return ConstantUsageTest(backends,
+        commonTensorInfo,
+        QuantizedVector<uint8_t>(scale, offset, { 1.f, 2.f, 3.f, 4.f, 5.f, 6.f }), // Input.
+        QuantizedVector<uint8_t>(scale, offset, { 6.f, 5.f, 4.f, 3.f, 2.f, 1.f }), // Const input.
+        QuantizedVector<uint8_t>(scale, offset, { 7.f, 7.f, 7.f, 7.f, 7.f, 7.f })  // Expected output.
+    );
+}
+
+} // anonymous namespace
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp
new file mode 100644
index 0000000..125b7e6
--- /dev/null
+++ b/src/backends/backendsCommon/test/FullyConnectedTestImpl.hpp
@@ -0,0 +1,287 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+template<typename T, typename B>
+LayerTestResult<T, 2> SimpleFullyConnectedTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    armnn::TensorInfo inputTensorInfo,
+    armnn::TensorInfo outputTensorInfo,
+    armnn::TensorInfo weightsDesc,
+    armnn::TensorInfo biasesDesc,
+    boost::multi_array<T, 2>& weights,
+    boost::multi_array<B, 1>& bias,
+    boost::multi_array<T, 4>& input,
+    bool biasEnabled,
+    bool transposeWeights)
+{
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::FullyConnectedQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    armnn::ScopedCpuTensorHandle weightsTensor(weightsDesc);
+    armnn::ScopedCpuTensorHandle biasTensor(biasesDesc);
+
+    AllocateAndCopyDataToITensorHandle(&weightsTensor, &weights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&biasTensor, &bias[0]);
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Weight = &weightsTensor;
+    data.m_Bias = &biasTensor;
+    data.m_Parameters.m_BiasEnabled = biasEnabled;
+    data.m_Parameters.m_TransposeWeightMatrix = transposeWeights;
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFullyConnected(data, info);
+    LayerTestResult<T, 2> result(outputTensorInfo);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+
+    return result;
+}
+
+LayerTestResult<float, 2> FullyConnectedFloat32Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled,
+    bool transposeWeights)
+{
+    unsigned int inputWidth = 1;
+    unsigned int inputHeight = 1;
+    unsigned int inputChannels = 5;
+    unsigned int inputNum = 2;
+
+    unsigned int outputChannels = 3;
+    unsigned int outputNum = 2;
+
+    // Define the tensor descriptors.
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+    armnn::TensorInfo weightsDesc;
+    armnn::TensorInfo biasesDesc;
+
+    unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
+    unsigned int outputShape[] = { outputNum, outputChannels };
+    unsigned int weightsShape[] = { inputChannels, outputChannels };
+    if (transposeWeights)
+    {
+        std::swap(weightsShape[0], weightsShape[1]);
+    }
+    unsigned int biasShape[] = { outputChannels };
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::DataType::Float32);
+    weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::DataType::Float32);
+    biasesDesc = armnn::TensorInfo(1, biasShape, armnn::DataType::Float32);
+
+    LayerTestResult<float, 2> result(outputTensorInfo);
+
+    boost::multi_array<float, 4> input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(
+        {
+            1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
+
+            5.0f, 4.0f, 3.0f, 2.0f, 1.0f
+        })
+    );
+
+    boost::multi_array<float, 2> weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
+        {
+            .5f, 2.f, .5f,
+            .5f, 2.f, 1.f,
+            .5f, 2.f, 2.f,
+            .5f, 2.f, 3.f,
+            .5f, 2.f, 4.f
+        }));
+
+    if (transposeWeights)
+    {
+        weights = MakeTensor<float, 2>(weightsDesc, std::vector<float>(
+        {
+            .5f, .5f, .5f, .5f, .5f,
+            2.f, 2.f, 2.f, 2.f, 2.f,
+            .5f, 1.f, 2.f, 3.f, 4.f
+        }));
+    }
+
+
+    std::vector<float> biasValues({0.f, 0.f, 0.f});
+    if (biasEnabled)
+    {
+        biasValues =  std::vector<float>({10.f, 20.f, 30.f});
+    }
+    boost::multi_array<float, 1> bias = MakeTensor<float, 1>(biasesDesc, biasValues);
+
+    result = SimpleFullyConnectedTestImpl<float>(
+        workloadFactory,
+        inputTensorInfo, outputTensorInfo,
+        weightsDesc, biasesDesc,
+        weights, bias, input,
+        biasEnabled, transposeWeights
+    );
+
+    result.outputExpected = MakeTensor<float, 2>(outputTensorInfo, std::vector<float>(
+        {
+            0.5f + 1.0f + 1.5f + 2.0f + 2.5f + biasValues[0],
+            2.0f + 4.0f + 6.0f + 8.0f + 10.f + biasValues[1],
+            0.5f + 2.0f + 6.0f + 12.f + 20.f + biasValues[2],
+
+            2.5f + 2.0f + 1.5f + 1.0f + 0.5f + biasValues[0],
+            10.0f + 8.0f + 6.0f + 4.0f + 2.f + biasValues[1],
+            2.5f + 4.0f + 6.0f + 6.f + 4.f   + biasValues[2]
+        })
+    );
+
+    return result;
+}
+
+LayerTestResult<uint8_t, 2> FullyConnectedUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
+{
+    constexpr static unsigned int inputWidth = 3u;
+    constexpr static unsigned int inputHeight = 2u;
+    constexpr static unsigned int inputChannels = 1u;
+
+    constexpr static unsigned int inputSize = inputWidth * inputHeight * inputChannels;
+
+    constexpr static unsigned int outputChannels = 2u;
+
+    armnn::TensorInfo inputTensorInfo({ 1, inputChannels, inputHeight, inputWidth }, armnn::DataType::QuantisedAsymm8);
+    inputTensorInfo.SetQuantizationScale(0.1f);
+    inputTensorInfo.SetQuantizationOffset(63);
+
+    armnn::TensorInfo outputTensorInfo({ 1, outputChannels }, armnn::DataType::QuantisedAsymm8);
+    outputTensorInfo.SetQuantizationScale(5.f);
+    outputTensorInfo.SetQuantizationOffset(biasEnabled ? -50 : 10);
+
+    armnn::TensorInfo weightsDesc({ outputChannels, inputSize }, armnn::DataType::QuantisedAsymm8);
+    weightsDesc.SetQuantizationScale(0.2f);
+    weightsDesc.SetQuantizationOffset(93);
+
+    armnn::TensorInfo biasesDesc({ outputChannels }, armnn::DataType::Signed32);
+    biasesDesc.SetQuantizationScale(inputTensorInfo.GetQuantizationScale() * weightsDesc.GetQuantizationScale());
+    biasesDesc.SetQuantizationOffset(0);
+
+    LayerTestResult<uint8_t, 2> result(outputTensorInfo);
+
+    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>{51, 124, 28,
+        251, 8, 92});
+
+    auto weights = MakeTensor<uint8_t, 2>(weightsDesc, std::vector<uint8_t>{51, 193, 42, 53, 175, 34,
+        210, 145, 23, 74, 34, 150});
+
+        // scale = 0.02
+        // offset = 0
+    auto bias = MakeTensor<int32_t, 1>(biasesDesc, std::vector<int32_t>{9250, 67500});
+
+    result = SimpleFullyConnectedTestImpl<uint8_t>(
+        workloadFactory,
+        inputTensorInfo, outputTensorInfo,
+        weightsDesc, biasesDesc,
+        weights, bias, input,
+        biasEnabled, true
+    );
+
+    // Manually calculated.
+    // Note one of these values has been clamped to 0.
+    if (biasEnabled)
+    {
+        result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 242});
+    }
+    else
+    {
+        result.outputExpected = MakeTensor<uint8_t, 2>(outputTensorInfo, std::vector<uint8_t>{0, 32});
+    }
+
+    return result;
+}
+
+
+
+//
+// ArmNN variant of the AndroidNN fully_connected_float_large test.
+//
+// Tests the fully connected layer with large values, optionally transposing weights.
+// Note this is templated for consistency, but the nature of this tests makes it unlikely to be useful in Uint8 mode.
+//
+template<typename T>
+LayerTestResult<T, 2> FullyConnectedLargeTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                        bool transposeWeights,
+                                                        float qScale = 0.0f,
+                                                        int32_t qOffset = 0)
+{
+    unsigned int inputWidth = 1;
+    unsigned int inputHeight = 1;
+    unsigned int inputChannels = 5;
+    unsigned int inputNum = 1;
+
+    unsigned int outputChannels = 1;
+    unsigned int outputNum = 1;
+
+    // Define the tensor descriptors.
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+    armnn::TensorInfo weightsDesc;
+    armnn::TensorInfo biasesDesc;
+
+    unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
+    unsigned int outputShape[] = { outputNum, outputChannels };
+    unsigned int weightsShape[] = { inputChannels, outputChannels };
+    if (transposeWeights)
+    {
+        std::swap(weightsShape[0], weightsShape[1]);
+    }
+
+    unsigned int biasShape[] = { outputChannels };
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
+    outputTensorInfo = armnn::TensorInfo(2, outputShape, armnn::GetDataType<T>());
+    weightsDesc = armnn::TensorInfo(2, weightsShape, armnn::GetDataType<T>());
+    biasesDesc = armnn::TensorInfo(1, biasShape, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    LayerTestResult<T, 2> result(outputTensorInfo);
+
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            1.0f, 10.0f, 100.0f, 1000.0f, 10000.0f,
+        })
+    );
+
+    boost::multi_array<T, 2> weights = MakeTensor<T, 2>(weightsDesc,
+        QuantizedVector<T>(qScale, qOffset, {
+            2.0f, 3.0f, 4.0f, 5.0f, 6.0f
+        })
+    );
+
+    std::vector<T> biasValues({900000.f});
+    boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasesDesc, biasValues);
+
+    result = SimpleFullyConnectedTestImpl<T>(
+        workloadFactory,
+        inputTensorInfo, outputTensorInfo,
+        weightsDesc, biasesDesc,
+        weights, bias, input,
+        true, transposeWeights
+    );
+
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            965432.0f,
+        })
+    );
+
+    return result;
+}
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
new file mode 100644
index 0000000..2c992bc
--- /dev/null
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -0,0 +1,565 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <Graph.hpp>
+
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <boost/core/ignore_unused.hpp>
+
+namespace
+{
+armnn::Graph dummyGraph;
+
+// Make a dummy TensorInfo object.
+template<armnn::DataType DataType>
+armnn::TensorInfo MakeDummyTensorInfo()
+{
+    return armnn::TensorInfo({2,2,2,2}, DataType);
+}
+
+
+// Make a dummy WorkloadInfo using a dummy TensorInfo.
+template<armnn::DataType DataType>
+armnn::WorkloadInfo MakeDummyWorkloadInfo(unsigned int numInputs, unsigned int numOutputs)
+{
+    armnn::WorkloadInfo info;
+    for (unsigned int i=0; i < numInputs; i++)
+    {
+        info.m_InputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
+    }
+    for (unsigned int o=0; o < numOutputs; o++)
+    {
+        info.m_OutputTensorInfos.push_back(MakeDummyTensorInfo<DataType>());
+    }
+    return info;
+}
+
+// Template class to create a dummy layer (2 parameters).
+template<typename LayerType, typename DescType = typename LayerType::DescriptorType>
+struct DummyLayer
+{
+    DummyLayer()
+    {
+        m_Layer = dummyGraph.AddLayer<LayerType>(DescType(), "");
+    }
+    ~DummyLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+    LayerType* m_Layer;
+};
+
+// Template class to create a dummy layer (1 parameter).
+template<typename LayerType>
+struct DummyLayer<LayerType, void>
+{
+    DummyLayer()
+    {
+        m_Layer = dummyGraph.AddLayer<LayerType>("");
+    }
+    ~DummyLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+    LayerType* m_Layer;
+};
+
+template<>
+struct DummyLayer<armnn::BatchNormalizationLayer>
+{
+    DummyLayer()
+    {
+        m_Layer = dummyGraph.AddLayer<armnn::BatchNormalizationLayer>(armnn::BatchNormalizationDescriptor(), "");
+        m_Layer->m_Mean = std::make_unique<armnn::ScopedCpuTensorHandle>(
+            armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_Variance = std::make_unique<armnn::ScopedCpuTensorHandle>(
+            armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_Beta = std::make_unique<armnn::ScopedCpuTensorHandle>(
+            armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_Gamma = std::make_unique<armnn::ScopedCpuTensorHandle>(
+            armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+    }
+    ~DummyLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+    armnn::BatchNormalizationLayer* m_Layer;
+
+};
+
+template<>
+struct DummyLayer<armnn::ConstantLayer, void>
+{
+    DummyLayer()
+    {
+        m_Layer = dummyGraph.AddLayer<armnn::ConstantLayer>("");
+    }
+    ~DummyLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+    armnn::ConstantLayer* m_Layer;
+};
+
+template<>
+struct DummyLayer<armnn::InputLayer, armnn::LayerBindingId>
+{
+    DummyLayer()
+    {
+        m_Layer = dummyGraph.AddLayer<armnn::InputLayer>(armnn::LayerBindingId(), "");
+
+    }
+    ~DummyLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+    armnn::InputLayer* m_Layer;
+};
+
+template<>
+struct DummyLayer<armnn::MergerLayer>
+{
+    DummyLayer()
+    {
+        armnn::OriginsDescriptor desc(2);
+        m_Layer = dummyGraph.AddLayer<armnn::MergerLayer>(desc, "");
+
+    }
+    ~DummyLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+    armnn::MergerLayer* m_Layer;
+};
+
+template<>
+struct DummyLayer<armnn::OutputLayer, armnn::LayerBindingId>
+{
+    DummyLayer()
+    {
+        m_Layer = dummyGraph.AddLayer<armnn::OutputLayer>(armnn::LayerBindingId(), "");
+
+    }
+    ~DummyLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+    armnn::OutputLayer* m_Layer;
+};
+
+template<>
+struct DummyLayer<armnn::SplitterLayer>
+{
+    DummyLayer()
+    {
+        armnn::ViewsDescriptor desc(1);
+        m_Layer = dummyGraph.AddLayer<armnn::SplitterLayer>(desc, "");
+
+    }
+    ~DummyLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+    armnn::SplitterLayer* m_Layer;
+};
+
+template <typename ConvolutionLayerType>
+struct DummyConvolutionLayer
+{
+    DummyConvolutionLayer()
+    {
+        typename ConvolutionLayerType::DescriptorType desc;
+        m_Layer = dummyGraph.AddLayer<ConvolutionLayerType>(desc, "");
+        m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
+            armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_Bias = std::make_unique<armnn::ScopedCpuTensorHandle>(
+            armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+    }
+    ~DummyConvolutionLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+    ConvolutionLayerType* m_Layer;
+};
+
+template<>
+struct DummyLayer<armnn::Convolution2dLayer>
+    : public DummyConvolutionLayer<armnn::Convolution2dLayer>
+{
+};
+
+template<>
+struct DummyLayer<armnn::DepthwiseConvolution2dLayer>
+    : public DummyConvolutionLayer<armnn::DepthwiseConvolution2dLayer>
+{
+};
+
+template <typename LstmLayerType>
+struct DummyLstmLayer
+{
+    DummyLstmLayer()
+    {
+        typename LstmLayerType::DescriptorType desc;
+        desc.m_CifgEnabled = false;
+
+        m_Layer = dummyGraph.AddLayer<LstmLayerType>(armnn::LstmDescriptor(), "");
+        m_Layer->m_BasicParameters.m_InputToForgetWeights     = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_BasicParameters.m_InputToCellWeights       = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_BasicParameters.m_InputToOutputWeights     = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_BasicParameters.m_RecurrentToForgetWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_BasicParameters.m_RecurrentToCellWeights   = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_BasicParameters.m_RecurrentToOutputWeights = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_BasicParameters.m_ForgetGateBias           = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_BasicParameters.m_CellBias                 = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_BasicParameters.m_OutputGateBias           = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+
+        m_Layer->m_CifgParameters.m_InputToInputWeights        = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_CifgParameters.m_RecurrentToInputWeights    = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_CifgParameters.m_CellToInputWeights         = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+        m_Layer->m_CifgParameters.m_InputGateBias              = std::make_unique<armnn::ScopedCpuTensorHandle>(
+                armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+    }
+    ~DummyLstmLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+    armnn::LstmLayer* m_Layer;
+};
+
+template<>
+struct DummyLayer<armnn::LstmLayer>
+        : public DummyLstmLayer<armnn::LstmLayer>
+{
+};
+
+template<>
+struct DummyLayer<armnn::FullyConnectedLayer>
+{
+    DummyLayer()
+    {
+        armnn::FullyConnectedLayer::DescriptorType desc;
+        m_Layer = dummyGraph.AddLayer<armnn::FullyConnectedLayer>(desc, "");
+        m_Layer->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(
+            armnn::TensorInfo(armnn::TensorShape({1,1,1,1}), armnn::DataType::Float32));
+    }
+    ~DummyLayer()
+    {
+        dummyGraph.EraseLayer(m_Layer);
+    }
+    armnn::FullyConnectedLayer* m_Layer;
+};
+
+// Tag for giving LayerType entries a unique strong type each.
+template<armnn::LayerType>
+struct Tag{};
+
+#define DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, descType) \
+template<armnn::DataType DataType> \
+struct LayerTypePolicy<armnn::LayerType::name, DataType> \
+{ \
+    using Type = armnn::name##Layer; \
+    using Desc = descType; \
+    using QueueDesc = armnn::name##QueueDescriptor; \
+    constexpr static const char* NameStr = #name; \
+    \
+    static std::unique_ptr<armnn::IWorkload> MakeDummyWorkload(armnn::IWorkloadFactory *factory, \
+        unsigned int nIn, unsigned int nOut) \
+    { \
+        QueueDesc desc; \
+        armnn::WorkloadInfo info = MakeDummyWorkloadInfo<DataType>(nIn, nOut); \
+        return factory->Create##name(desc, info); \
+    } \
+};
+
+// Define a layer policy specialization for use with the IsLayerSupported tests.
+// Use this version for layers whose constructor takes 1 parameter(name).
+#define DECLARE_LAYER_POLICY_1_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, void)
+
+// Define a layer policy specialization for use with the IsLayerSupported tests.
+// Use this version for layers whose constructor takes 2 parameters(descriptor and name).
+#define DECLARE_LAYER_POLICY_2_PARAM(name) DECLARE_LAYER_POLICY_CUSTOM_PARAM(name, armnn::name##Descriptor)
+
+// Layer policy template.
+template<armnn::LayerType Type, armnn::DataType DataType>
+struct LayerTypePolicy;
+
+// Every entry in the armnn::LayerType enum must be accounted for below.
+DECLARE_LAYER_POLICY_2_PARAM(Activation)
+
+DECLARE_LAYER_POLICY_1_PARAM(Addition)
+
+DECLARE_LAYER_POLICY_2_PARAM(BatchNormalization)
+
+DECLARE_LAYER_POLICY_1_PARAM(Constant)
+
+DECLARE_LAYER_POLICY_1_PARAM(ConvertFp16ToFp32)
+
+DECLARE_LAYER_POLICY_1_PARAM(ConvertFp32ToFp16)
+
+DECLARE_LAYER_POLICY_2_PARAM(Convolution2d)
+
+DECLARE_LAYER_POLICY_1_PARAM(MemCopy)
+
+DECLARE_LAYER_POLICY_2_PARAM(DepthwiseConvolution2d)
+
+DECLARE_LAYER_POLICY_2_PARAM(FakeQuantization)
+
+DECLARE_LAYER_POLICY_1_PARAM(Floor)
+
+DECLARE_LAYER_POLICY_2_PARAM(FullyConnected)
+
+DECLARE_LAYER_POLICY_CUSTOM_PARAM(Input, armnn::LayerBindingId)
+
+DECLARE_LAYER_POLICY_2_PARAM(L2Normalization)
+
+DECLARE_LAYER_POLICY_2_PARAM(Lstm)
+
+DECLARE_LAYER_POLICY_2_PARAM(Mean)
+
+DECLARE_LAYER_POLICY_2_PARAM(Merger)
+
+DECLARE_LAYER_POLICY_1_PARAM(Multiplication)
+
+DECLARE_LAYER_POLICY_2_PARAM(Normalization)
+
+DECLARE_LAYER_POLICY_CUSTOM_PARAM(Output, armnn::LayerBindingId)
+
+DECLARE_LAYER_POLICY_2_PARAM(Pad)
+
+DECLARE_LAYER_POLICY_2_PARAM(Permute)
+
+DECLARE_LAYER_POLICY_2_PARAM(Pooling2d)
+
+DECLARE_LAYER_POLICY_1_PARAM(Division)
+
+DECLARE_LAYER_POLICY_2_PARAM(ResizeBilinear)
+
+DECLARE_LAYER_POLICY_2_PARAM(Reshape)
+
+DECLARE_LAYER_POLICY_2_PARAM(Softmax)
+
+DECLARE_LAYER_POLICY_2_PARAM(SpaceToBatchNd)
+
+DECLARE_LAYER_POLICY_2_PARAM(Splitter)
+
+DECLARE_LAYER_POLICY_1_PARAM(Subtraction)
+
+
+// Generic implementation to get the number of input slots for a given layer type;
+template<armnn::LayerType Type>
+unsigned int GetNumInputs(const armnn::Layer& layer)
+{
+    return layer.GetNumInputSlots();
+}
+
+// Generic implementation to get the number of output slots for a given layer type;
+template<armnn::LayerType Type>
+unsigned int GetNumOutputs(const armnn::Layer& layer)
+{
+    return layer.GetNumOutputSlots();
+}
+
+template<>
+unsigned int GetNumInputs<armnn::LayerType::Merger>(const armnn::Layer& layer)
+{
+    boost::ignore_unused(layer);
+    return 2;
+}
+
+// Tests that the IsLayerSupported() function returns the correct value.
+// We determined the correct value by *trying* to create the relevant workload and seeing if it matches what we expect.
+// Returns true if expectations are met, otherwise returns false.
+template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
+bool IsLayerSupportedTest(FactoryType *factory, Tag<Type>)
+{
+    using LayerPolicy = LayerTypePolicy<Type, DataType>;
+    using LayerType = typename LayerPolicy::Type;
+    using LayerDesc = typename LayerPolicy::Desc;
+    DummyLayer<LayerType, LayerDesc> layer;
+
+    unsigned int numIn = GetNumInputs<Type>(*layer.m_Layer);
+    unsigned int numOut = GetNumOutputs<Type>(*layer.m_Layer);
+
+    // Make another dummy layer just to make IsLayerSupported have valid inputs.
+    DummyLayer<armnn::ConstantLayer, void> previousLayer;
+    // Set output of the previous layer to a dummy tensor.
+    armnn::TensorInfo output = MakeDummyTensorInfo<DataType>();
+    previousLayer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
+    // Connect all outputs of the previous layer to inputs of tested layer.
+    for (unsigned int i = 0; i < numIn; i++)
+    {
+        armnn::IOutputSlot& previousLayerOutputSlot = previousLayer.m_Layer->GetOutputSlot(0);
+        armnn::IInputSlot& layerInputSlot = layer.m_Layer->GetInputSlot(i);
+        previousLayerOutputSlot.Connect(layerInputSlot);
+    }
+    // Set outputs of tested layer to a dummy tensor.
+    for (unsigned int i = 0; i < numOut; i++)
+    {
+        layer.m_Layer->GetOutputSlot(0).SetTensorInfo(output);
+    }
+
+    std::string layerName = LayerPolicy::NameStr;
+    std::string reasonIfUnsupported;
+    if (FactoryType::IsLayerSupported(*layer.m_Layer, DataType, reasonIfUnsupported))
+    {
+        std::string errorMsg = " layer expected support but found none.";
+        try
+        {
+            bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
+            BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
+            return retVal;
+        }
+        catch(const armnn::InvalidArgumentException& e)
+        {
+            boost::ignore_unused(e);
+            // This is ok since we throw InvalidArgumentException when creating the dummy workload.
+            return true;
+        }
+        catch(const std::exception& e)
+        {
+            errorMsg = e.what();
+            BOOST_TEST_ERROR(layerName << ": " << errorMsg);
+            return false;
+        }
+        catch(...)
+        {
+            errorMsg = "Unexpected error while testing support for ";
+            BOOST_TEST_ERROR(errorMsg << layerName);
+            return false;
+        }
+    }
+    else
+    {
+        std::string errorMsg = "layer expected no support (giving reason: " + reasonIfUnsupported + ") but found some.";
+        try
+        {
+            bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
+            BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
+            return retVal;
+        }
+        // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
+        // using parameters that make IsLayerSupported() return false should throw an
+        // InvalidArgumentException or UnimplementedException.
+        catch(const armnn::InvalidArgumentException& e)
+        {
+            boost::ignore_unused(e);
+            return true;
+        }
+        catch(const armnn::UnimplementedException& e)
+        {
+            boost::ignore_unused(e);
+            return true;
+        }
+        catch(const std::exception& e)
+        {
+            errorMsg = e.what();
+            BOOST_TEST_ERROR(layerName << ": " << errorMsg);
+            return false;
+        }
+        catch(...)
+        {
+            errorMsg = "Unexpected error while testing support for ";
+            BOOST_TEST_ERROR(errorMsg << layerName);
+            return false;
+        }
+    }
+}
+
+// Helper function to compute the next type in the LayerType enum.
+constexpr armnn::LayerType NextType(armnn::LayerType type)
+{
+    return static_cast<armnn::LayerType>(static_cast<int>(type)+1);
+}
+
+// Termination function for determining the end of the LayerType enumeration.
+template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
+bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<armnn::LayerType::LastLayer>)
+{
+    return IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
+};
+
+// Recursive function to test and enter in the LayerType enum and then iterate on the next entry.
+template<typename FactoryType, armnn::DataType DataType, armnn::LayerType Type>
+bool IsLayerSupportedTestsImpl(FactoryType *factory, Tag<Type>)
+{
+    bool v = IsLayerSupportedTest<FactoryType, DataType, Type>(factory, Tag<Type>());
+
+    return v &&
+    IsLayerSupportedTestsImpl<FactoryType, DataType, NextType(Type)>
+        (factory, Tag<NextType(Type)>());
+};
+
+// Helper function to pass through to the test framework.
+template<typename FactoryType, armnn::DataType DataType>
+bool IsLayerSupportedTests(FactoryType *factory)
+{
+    return IsLayerSupportedTestsImpl<FactoryType, DataType>(factory, Tag<armnn::LayerType::FirstLayer>());
+};
+
+template<armnn::LayerType Type>
+bool TestLayerTypeMatches()
+{
+    using LayerPolicy = LayerTypePolicy<Type, armnn::DataType::Float32>;
+    using LayerType = typename LayerPolicy::Type;
+    using LayerDesc = typename LayerPolicy::Desc;
+    DummyLayer<LayerType, LayerDesc> layer;
+
+    std::stringstream ss;
+    ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
+    bool v = Type == layer.m_Layer->GetType();
+    BOOST_CHECK_MESSAGE(v, ss.str());
+    return v;
+};
+
+template<armnn::LayerType Type>
+bool LayerTypeMatchesTestImpl(Tag<armnn::LayerType::LastLayer>)
+{
+    return TestLayerTypeMatches<Type>();
+};
+
+template<armnn::LayerType Type>
+bool LayerTypeMatchesTestImpl(Tag<Type>)
+{
+    return TestLayerTypeMatches<Type>() &&
+        LayerTypeMatchesTestImpl<NextType(Type)>(Tag<NextType(Type)>());
+};
+
+template<typename FactoryType, typename LayerType, armnn::DataType InputDataType , armnn::DataType OutputDataType>
+bool IsConvertLayerSupportedTests(std::string& reasonIfUnsupported)
+{
+    armnn::Graph graph;
+    LayerType* const layer = graph.AddLayer<LayerType>("LayerName");
+
+    armnn::Layer* const input = graph.AddLayer<armnn::InputLayer>(0, "input");
+    armnn::Layer* const output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+    armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, InputDataType);
+    armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, OutputDataType);
+
+    input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+    input->GetOutputHandler(0).SetTensorInfo(inputTensorInfo);
+    layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+    layer->GetOutputHandler(0).SetTensorInfo(outputTensorInfo);
+
+    bool result = FactoryType::IsLayerSupported(*layer, InputDataType, reasonIfUnsupported);
+
+    return result;
+};
+
+} //namespace
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.hpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.hpp
new file mode 100644
index 0000000..a286b28
--- /dev/null
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.hpp
@@ -0,0 +1,355 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <Profiling.hpp>
+
+#include <armnn/Descriptors.hpp>
+#include <armnn/IRuntime.hpp>
+#include <armnn/INetwork.hpp>
+
+#include <boost/test/unit_test.hpp>
+#include <boost/algorithm/string.hpp>
+#include <boost/lexical_cast.hpp>
+
+#include <sstream>
+#include <stack>
+#include <string>
+#include <vector>
+
+inline bool AreMatchingPair(const char opening, const char closing)
+{
+    return (opening == '{' && closing == '}') || (opening == '[' && closing == ']');
+}
+
+inline bool AreParenthesesMatching(const std::string& exp)
+{
+    std::stack<char> expStack;
+    for (size_t i = 0; i < exp.length(); ++i)
+    {
+        if (exp[i] == '{' || exp[i] == '[')
+        {
+            expStack.push(exp[i]);
+        }
+        else if (exp[i] == '}' || exp[i] == ']')
+        {
+            if (expStack.empty() || !AreMatchingPair(expStack.top(), exp[i]))
+            {
+                return false;
+            }
+            else
+            {
+                expStack.pop();
+            }
+        }
+    }
+    return expStack.empty();
+}
+
+inline std::vector<double> ExtractMeasurements(const std::string& exp)
+{
+    std::vector<double> numbers;
+    bool inArray = false;
+    std::string numberString;
+    for (size_t i = 0; i < exp.size(); ++i)
+    {
+        if (exp[i] == '[')
+        {
+            inArray = true;
+        }
+        else if (exp[i] == ']' && inArray)
+        {
+            try
+            {
+                boost::trim_if(numberString, boost::is_any_of("\t,\n"));
+                numbers.push_back(std::stod(numberString));
+            }
+            catch (std::invalid_argument const& e)
+            {
+                BOOST_FAIL("Could not convert measurements to double: " + numberString);
+            }
+
+            numberString.clear();
+            inArray = false;
+        }
+        else if (exp[i] == ',' && inArray)
+        {
+            try
+            {
+                boost::trim_if(numberString, boost::is_any_of("\t,\n"));
+                numbers.push_back(std::stod(numberString));
+            }
+            catch (std::invalid_argument const& e)
+            {
+                BOOST_FAIL("Could not convert measurements to double: " + numberString);
+            }
+            numberString.clear();
+        }
+        else if (exp[i] != '[' && inArray && exp[i] != ',' && exp[i] != ' ')
+        {
+            numberString += exp[i];
+        }
+    }
+    return numbers;
+}
+
+inline std::vector<std::string> ExtractSections(const std::string& exp)
+{
+    std::vector<std::string> sections;
+
+    std::stack<size_t> s;
+    for (size_t i = 0; i < exp.size(); i++)
+    {
+        if (exp.at(i) == '{')
+        {
+            s.push(i);
+        }
+        else if (exp.at(i) == '}')
+        {
+            size_t from = s.top();
+            s.pop();
+            sections.push_back(exp.substr(from, i - from + 1));
+        }
+    }
+
+    return sections;
+}
+
+inline std::string SoftmaxProfilerTestSetupHelper(const std::vector<armnn::BackendId>& backends)
+{
+    using namespace armnn;
+
+    BOOST_CHECK(!backends.empty());
+
+    ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
+
+    // Create runtime in which test will run
+    IRuntime::CreationOptions options;
+    options.m_EnableGpuProfiling = backends.front() == armnn::Compute::GpuAcc;
+    IRuntimePtr runtime(IRuntime::Create(options));
+
+    // build up the structure of the network
+    INetworkPtr net(INetwork::Create());
+
+    IConnectableLayer* input = net->AddInputLayer(0, "input");
+    IConnectableLayer* softmax = net->AddSoftmaxLayer(SoftmaxDescriptor(), "softmax");
+    IConnectableLayer* output  = net->AddOutputLayer(0, "output");
+
+    input->GetOutputSlot(0).Connect(softmax->GetInputSlot(0));
+    softmax->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    // set the tensors in the network
+    TensorInfo inputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+    inputTensorInfo.SetQuantizationOffset(100);
+    inputTensorInfo.SetQuantizationScale(10000.0f);
+    input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+
+    TensorInfo outputTensorInfo(TensorShape({1, 5}), DataType::QuantisedAsymm8);
+    outputTensorInfo.SetQuantizationOffset(0);
+    outputTensorInfo.SetQuantizationScale(1.0f / 256.0f);
+    softmax->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    // optimize the network
+    IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
+    if(!optNet)
+    {
+        BOOST_FAIL("Error occurred during Optimization, Optimize() returned nullptr.");
+    }
+    // load it into the runtime
+    NetworkId netId;
+    auto error = runtime->LoadNetwork(netId, std::move(optNet));
+    BOOST_TEST(error == Status::Success);
+
+    // create structures for input & output
+    std::vector<uint8_t> inputData
+        {
+            1, 10, 3, 200, 5
+            // one of inputs is sufficiently larger than the others to saturate softmax
+        };
+    std::vector<uint8_t> outputData(5);
+
+    armnn::InputTensors inputTensors
+        {
+            {0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())}
+        };
+    armnn::OutputTensors outputTensors
+        {
+            {0, armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
+        };
+
+    runtime->GetProfiler(netId)->EnableProfiling(true);
+
+    // do the inferences
+    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
+
+    // retrieve the Profiler.Print() output
+    std::stringstream ss;
+    profilerManager.GetProfiler()->Print(ss);
+
+    return ss.str();
+}
+
+inline void SoftmaxProfilerTestValidationHelper(std::string& result, const std::string& testData)
+{
+    // ensure all measurements are greater than zero
+    std::vector<double> measurementsVector = ExtractMeasurements(result);
+    BOOST_CHECK(!measurementsVector.empty());
+
+    // check sections contain raw and unit tags
+    // first ensure Parenthesis are balanced
+    if (AreParenthesesMatching(result))
+    {
+        // remove parent sections that will not have raw or unit tag
+        std::vector<std::string> sectionVector = ExtractSections(result);
+        for (size_t i = 0; i < sectionVector.size(); ++i)
+        {
+            if (boost::contains(sectionVector[i], "\"ArmNN\":")
+                || boost::contains(sectionVector[i], "\"inference_measurements\":"))
+            {
+                sectionVector.erase(sectionVector.begin() + static_cast<int>(i));
+            }
+        }
+        BOOST_CHECK(!sectionVector.empty());
+
+        BOOST_CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
+                                [](std::string i) { return boost::contains(i, "\"raw\":"); }));
+
+        BOOST_CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
+                                [](std::string i) { return boost::contains(i, "\"unit\":"); }));
+    }
+
+    // remove the time measurements as they vary from test to test
+    result.erase(std::remove_if (result.begin(),result.end(),
+                                 [](char c) { return c == '.'; }), result.end());
+    result.erase(std::remove_if (result.begin(), result.end(), &isdigit), result.end());
+    result.erase(std::remove_if (result.begin(),result.end(),
+                                 [](char c) { return c == '\t'; }), result.end());
+
+    BOOST_CHECK(boost::contains(result, "ArmNN"));
+    BOOST_CHECK(boost::contains(result, "inference_measurements"));
+    BOOST_CHECK(boost::contains(result, "layer_measurements"));
+    BOOST_CHECK_EQUAL(result, testData);
+
+    // ensure no spare parenthesis present in print output
+    BOOST_CHECK(AreParenthesesMatching(result));
+}
+
+inline void SetupSoftmaxProfilerWithSpecifiedBackendsAndValidateJsonPrinterResult(
+        const std::vector<armnn::BackendId>& backends)
+{
+    // setup the test fixture and obtain JSON Printer result
+    std::string result = SoftmaxProfilerTestSetupHelper(backends);
+
+    std::string backend = "Ref";
+    std::string changeLine31 = "\n},\n\"CopyMemGeneric_Execute\": {";
+    std::string changeLine39 = "us\"";
+    std::string changeLine40;
+    std::string changeLine45;
+
+    if (backends[0] == armnn::Compute::GpuAcc) {
+        backend = "Cl";
+        changeLine31 = ",\n\"OpenClKernelTimer/: softmax_layer_max_shift_exp_sum_quantized_serial GWS[,,]\": {";
+        changeLine39 = R"(us"
+},
+"OpenClKernelTimer/: softmax_layer_norm_quantized GWS[,,]": {
+"raw": [
+,
+,
+
+],
+"unit": "us")";
+
+        changeLine40 = R"(
+},
+"CopyMemGeneric_Execute": {
+"raw": [
+,
+,
+
+],
+"unit": "us")";
+        changeLine45 = "}\n";
+    }
+    else if (backends[0] == armnn::Compute::CpuAcc)
+    {
+        backend = "Neon";
+        changeLine31 = ",\n\"NeonKernelTimer/: NEFillBorderKernel\": {";
+        changeLine39 = R"(us"
+},
+"NeonKernelTimer/: NELogitsDMaxKernel": {
+"raw": [
+,
+,
+
+],
+"unit": "us"
+},
+"NeonKernelTimer/: NELogitsDSoftmaxKernel": {
+"raw": [
+,
+,
+
+],
+"unit": "us")";
+        changeLine40 = R"(
+},
+"CopyMemGeneric_Execute": {
+"raw": [
+,
+,
+
+],
+"unit": "us")";
+        changeLine45 = "}\n";
+    }
+
+    std::string testData = R"({
+"ArmNN": {
+"inference_measurements": {
+"raw": [
+,
+,
+
+],
+"unit": "us",
+"layer_measurements": {
+"raw": [
+,
+,
+
+],
+"unit": "us",
+"CopyMemGeneric_Execute": {
+"raw": [
+,
+,
+
+],
+"unit": "us"
+},
+")" + backend + R"(SoftmaxUintWorkload_Execute": {
+"raw": [
+,
+,
+
+],
+"unit": "us")" + changeLine31 + R"(
+"raw": [
+,
+,
+
+],
+"unit": ")" + changeLine39 + R"(
+})" + changeLine40 + R"(
+}
+}
+}
+}
+)" + changeLine45 + R"()";
+
+    // validate the JSON Printer result
+    SoftmaxProfilerTestValidationHelper(result, testData);
+}
diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
new file mode 100644
index 0000000..fc32fdc
--- /dev/null
+++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
@@ -0,0 +1,213 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <Graph.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+#include <cl/ClWorkloadFactory.hpp>
+
+#include <boost/cast.hpp>
+#include <boost/test/unit_test.hpp>
+
+#include <utility>
+
+using namespace armnn;
+using namespace std;
+
+// connects two layers
+void Connect(Layer* from, Layer* to, const TensorInfo& tensorInfo, unsigned int fromIndex = 0, unsigned int toIndex = 0)
+{
+    from->GetOutputSlot(fromIndex).Connect(to->GetInputSlot(toIndex));
+    from->GetOutputHandler(fromIndex).SetTensorInfo(tensorInfo);
+}
+
+/////////////////////////////////////////////////////////////////////////////////////////////
+// The following test are created specifically to test ReleaseConstantData() method in the Layer
+// They build very simple graphs including the layer will be checked.
+// Checks weights and biases before the method called and after.
+/////////////////////////////////////////////////////////////////////////////////////////////
+
+BOOST_AUTO_TEST_SUITE(LayerReleaseConstantDataTest)
+
+BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
+{
+    Graph             graph;
+    ClWorkloadFactory factory;
+
+    // create the layer we're testing
+    BatchNormalizationDescriptor layerDesc;
+    layerDesc.m_Eps = 0.05f;
+    BatchNormalizationLayer* const layer = graph.AddLayer<BatchNormalizationLayer>(layerDesc, "layer");
+
+    armnn::TensorInfo weightInfo({3}, armnn::DataType::Float32);
+    layer->m_Mean     = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+    layer->m_Variance = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+    layer->m_Beta     = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+    layer->m_Gamma    = std::make_unique<ScopedCpuTensorHandle>(weightInfo);
+    layer->m_Mean->Allocate();
+    layer->m_Variance->Allocate();
+    layer->m_Beta->Allocate();
+    layer->m_Gamma->Allocate();
+
+    // create extra layers
+    Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+    Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+    // connect up
+    armnn::TensorInfo tensorInfo({2, 3, 1, 1}, armnn::DataType::Float32);
+    Connect(input, layer, tensorInfo);
+    Connect(layer, output, tensorInfo);
+
+    // check the constants that they are not NULL
+    BOOST_CHECK(layer->m_Mean != nullptr);
+    BOOST_CHECK(layer->m_Variance != nullptr);
+    BOOST_CHECK(layer->m_Beta != nullptr);
+    BOOST_CHECK(layer->m_Gamma != nullptr);
+
+    // free up the constants..
+    layer->ReleaseConstantData();
+
+    // check the constants that they are NULL now
+    BOOST_CHECK(layer->m_Mean == nullptr);
+    BOOST_CHECK(layer->m_Variance == nullptr);
+    BOOST_CHECK(layer->m_Beta == nullptr);
+    BOOST_CHECK(layer->m_Gamma == nullptr);
+
+ }
+
+
+ BOOST_AUTO_TEST_CASE(ReleaseConvolution2dLayerConstantDataTest)
+ {
+     Graph             graph;
+     ClWorkloadFactory factory;
+
+     // create the layer we're testing
+     Convolution2dDescriptor layerDesc;
+     layerDesc.m_PadLeft = 3;
+     layerDesc.m_PadRight = 3;
+     layerDesc.m_PadTop = 1;
+     layerDesc.m_PadBottom = 1;
+     layerDesc.m_StrideX = 2;
+     layerDesc.m_StrideY = 4;
+     layerDesc.m_BiasEnabled = true;
+
+     Convolution2dLayer* const layer = graph.AddLayer<Convolution2dLayer>(layerDesc, "layer");
+
+     layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({2, 3, 5, 3},
+                                                                          armnn::DataType::Float32));
+     layer->m_Bias   = std::make_unique<ScopedCpuTensorHandle>
+             (TensorInfo({2}, GetBiasDataType(armnn::DataType::Float32)));
+
+     layer->m_Weight->Allocate();
+     layer->m_Bias->Allocate();
+
+     // create extra layers
+     Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+     // connect up
+     Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
+     Connect(layer, output, TensorInfo({2, 2, 2, 10}, armnn::DataType::Float32));
+
+     // check the constants that they are not NULL
+     BOOST_CHECK(layer->m_Weight != nullptr);
+     BOOST_CHECK(layer->m_Bias != nullptr);
+
+     // free up the constants..
+     layer->ReleaseConstantData();
+
+     // check the constants that they are NULL now
+     BOOST_CHECK(layer->m_Weight == nullptr);
+     BOOST_CHECK(layer->m_Bias == nullptr);
+}
+
+BOOST_AUTO_TEST_CASE(ReleaseDepthwiseConvolution2dLayerConstantDataTest)
+{
+    Graph             graph;
+    ClWorkloadFactory factory;
+
+    // create the layer we're testing
+    DepthwiseConvolution2dDescriptor layerDesc;
+    layerDesc.m_PadLeft         = 3;
+    layerDesc.m_PadRight        = 3;
+    layerDesc.m_PadTop          = 1;
+    layerDesc.m_PadBottom       = 1;
+    layerDesc.m_StrideX         = 2;
+    layerDesc.m_StrideY         = 4;
+    layerDesc.m_BiasEnabled     = true;
+
+    DepthwiseConvolution2dLayer* const layer = graph.AddLayer<DepthwiseConvolution2dLayer>(layerDesc, "layer");
+
+    layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({3, 3, 5, 3}, DataType::Float32));
+    layer->m_Bias   = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({9}, DataType::Float32));
+    layer->m_Weight->Allocate();
+    layer->m_Bias->Allocate();
+
+    // create extra layers
+    Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+    Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+    // connect up
+    Connect(input, layer, TensorInfo({2, 3, 8, 16}, armnn::DataType::Float32));
+    Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
+
+    // check the constants that they are not NULL
+    BOOST_CHECK(layer->m_Weight != nullptr);
+    BOOST_CHECK(layer->m_Bias != nullptr);
+
+    // free up the constants..
+    layer->ReleaseConstantData();
+
+    // check the constants that they are NULL now
+    BOOST_CHECK(layer->m_Weight == nullptr);
+    BOOST_CHECK(layer->m_Bias == nullptr);
+}
+
+BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
+{
+    Graph             graph;
+    ClWorkloadFactory factory;
+
+    // create the layer we're testing
+    FullyConnectedDescriptor layerDesc;
+    layerDesc.m_BiasEnabled = true;
+    layerDesc.m_TransposeWeightMatrix = true;
+
+    FullyConnectedLayer* const layer = graph.AddLayer<FullyConnectedLayer>(layerDesc, "layer");
+
+    float inputsQScale = 1.0f;
+    float outputQScale = 2.0f;
+
+    layer->m_Weight = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7, 20},
+                                                          DataType::QuantisedAsymm8, inputsQScale, 0));
+    layer->m_Bias   = std::make_unique<ScopedCpuTensorHandle>(TensorInfo({7},
+                                                          GetBiasDataType(DataType::QuantisedAsymm8), inputsQScale));
+    layer->m_Weight->Allocate();
+    layer->m_Bias->Allocate();
+
+    // create extra layers
+    Layer* const input = graph.AddLayer<InputLayer>(0, "input");
+    Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
+
+    // connect up
+    Connect(input, layer, TensorInfo({3, 1, 4, 5}, DataType::QuantisedAsymm8, inputsQScale));
+    Connect(layer, output, TensorInfo({3, 7}, DataType::QuantisedAsymm8, outputQScale));
+
+    // check the constants that they are not NULL
+    BOOST_CHECK(layer->m_Weight != nullptr);
+    BOOST_CHECK(layer->m_Bias != nullptr);
+
+    // free up the constants..
+    layer->ReleaseConstantData();
+
+    // check the constants that they are NULL now
+    BOOST_CHECK(layer->m_Weight == nullptr);
+    BOOST_CHECK(layer->m_Bias == nullptr);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
+
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
new file mode 100755
index 0000000..12a7063
--- /dev/null
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -0,0 +1,6125 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#include "LayerTests.hpp"
+
+#include "test/TensorHelpers.hpp"
+#include "TensorCopyUtils.hpp"
+#include "Permute.hpp"
+
+#include <boost/test/unit_test.hpp>
+#include <boost/assert.hpp>
+
+#include <armnn/LayerSupport.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <algorithm>
+#include <boost/cast.hpp>
+
+#include "WorkloadTestUtils.hpp"
+#include "Conv2dTestImpl.hpp"
+#include "BatchNormTestImpl.hpp"
+#include "ActivationTestImpl.hpp"
+#include "Pooling2dTestImpl.hpp"
+#include "ReshapeTestImpl.hpp"
+#include "FullyConnectedTestImpl.hpp"
+#include "SplitterTestImpl.hpp"
+#include "SoftmaxTestImpl.hpp"
+#include "NormTestImpl.hpp"
+#include "PermuteTestImpl.hpp"
+#include "LstmTestImpl.hpp"
+#include "ConvertFp16ToFp32TestImpl.hpp"
+#include "ConvertFp32ToFp16TestImpl.hpp"
+
+// 3-channel 16x8 image used as common input data for a number of Conv2d tests.
+static std::vector<float> ConvInput3x8x16({
+    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+    0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+    0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f, 0.5f,
+    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+});
+
+// 2-channel bias used by a number of Conv2d tests.
+static std::vector<float> Bias2({0, 2});
+
+// Helper function that returns either Bias2 or an empty vector depending on whether bias is enabled.
+template<typename T>
+boost::multi_array<T, 1> GetBias2(bool biasEnabled, float qScale, int32_t qOffset)
+{
+    if(biasEnabled)
+    {
+        armnn::TensorInfo biasDesc({static_cast<unsigned int>(Bias2.size())}, armnn::GetDataType<T>());
+        boost::multi_array<T, 1> bias = MakeTensor<T, 1>(biasDesc, QuantizedVector<T>(qScale, qOffset, Bias2));
+        return bias;
+    }
+    else
+    {
+        return boost::multi_array<T, 1>();
+    }
+}
+
+template<typename T>
+LayerTestResult<T, 4> SimpleConvolution2d3x5TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                       float                    qScale,
+                                                       int32_t                  qOffset,
+                                                       bool                     biasEnabled,
+                                                       const armnn::DataLayoutIndexed& layout)
+{
+    // Use common single-batch 3-channel 16x8 image.
+    armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
+
+    // Use a 2-element batch with 3-channel 3x5 kernels.
+    armnn::TensorInfo kernelDesc({2, 3, 5, 3}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            1, 1, 1,
+            1, -1, 1,
+            1, 1, 1,
+            1, 1, 1,
+            1, 1, 1,
+
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0,
+
+            2, 2, 2,
+            2, 2, 2,
+            2, 2, 2,
+            2, 2, 2,
+            2, 2, 2,
+
+
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0,
+
+            1, 1, 1,
+            1, 1, 1,
+            1, 1, 1,
+            1, 1, 1,
+            1, 1, 1,
+
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0
+        })));
+
+    // Expected output is 2 batch elements of a 1-channel 14x4 image.
+    armnn::TensorInfo outputDesc({1, 2, 4, 14}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24,
+            -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25, -25,
+            -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
+            -23.5f, -23.5f, -23.5f,
+            -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f, -23.5f,
+            -23.5f, -23.5f, -23.5f,
+
+            5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+            5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+            5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+            5, 5, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+        })));
+
+    return SimpleConvolution2dTestImpl<T>(workloadFactory,
+      input,
+      kernel,
+      GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
+      expectedOutput,
+      qScale,
+      qOffset,
+      layout);
+}
+
+template<typename T>
+LayerTestResult<T, 4> SimpleConvolution2d3x3TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                       float                    qScale,
+                                                       int32_t                  qOffset,
+                                                       bool                     biasEnabled,
+                                                       const armnn::DataLayoutIndexed& layout)
+{
+    // Use a 3x3 kernel, which exercises ArmCompute's direct convolution path.
+
+    // Use common single-batch 3-channel 16x8 image.
+    armnn::TensorInfo inputDesc({1, 3, 8, 16}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, QuantizedVector<T>(qScale, qOffset, ConvInput3x8x16));
+
+    // Use a 2-element batch of 3-channel 3x3 kernels.
+    armnn::TensorInfo kernelDesc({2, 3, 3, 3}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            1, 1, 1,
+            1, -1, 1,
+            1, 1, 1,
+
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0,
+
+            2, 2, 2,
+            2, 2, 2,
+            2, 2, 2,
+
+
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0,
+
+            1, 1, 1,
+            1, 1, 1,
+            1, 1, 1,
+
+            0, 0, 0,
+            0, 0, 0,
+            0, 0, 0
+        })));
+
+    // Expected output is 1 batch of a 2-channel 14x6 image.
+    armnn::TensorInfo outputDesc({1, 2, 6, 14}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
+            -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16, -16,
+            -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
+            -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
+            -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
+            -14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,-14.5f,
+
+            3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+            3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+            3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+            3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+            3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+            3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+        })));
+
+    return SimpleConvolution2dTestImpl<T>(workloadFactory,
+      input,
+      kernel,
+      GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
+      expectedOutput,
+      qScale,
+      qOffset,
+      layout);
+}
+
+template<typename T>
+LayerTestResult<T, 4> SimpleConvolution2d3x3NhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                           float                    qScale,
+                                                           int32_t                  qOffset,
+                                                           bool                     biasEnabled,
+                                                           armnn::DataLayout        dataLayout)
+{
+    // Use common single-batch 5x5 image.
+
+    armnn::TensorInfo inputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc,
+                                                      {
+                                                       1, 5, 2, 3,
+                                                       8, 7, 3, 6,
+                                                       3, 3, 9, 1
+                                                       });
+
+
+    // Use a 2-element batch of 3-channel 3x3 kernels.
+    armnn::TensorInfo kernelDesc({1, 3, 3, 1}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, {
+                                                                    4, 5, 6,
+                                                                    0, 0, 0,
+                                                                    3, 2, 1
+                                                                    });
+
+    // Expected output is 1 batch of a 5x5 image.
+    armnn::TensorInfo outputDesc({1, 3, 4, 1}, armnn::GetDataType<T>());
+
+    const std::vector<float> outputData =
+            {
+                    23, 41, 33, 21,
+                    44, 65, 76, 52,
+                    82, 85, 79, 42
+            };
+
+    boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, outputData);
+
+    return SimpleConvolution2dNhwcTestImpl<T>(workloadFactory,
+                                              input,
+                                              kernel,
+                                              boost::multi_array<T, 1>(),
+                                              expectedOutput,
+                                              dataLayout,
+                                              qScale,
+                                              qOffset);
+}
+
+LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
+                                                     bool                     biasEnabled,
+                                                     const armnn::DataLayoutIndexed& layout)
+{
+    return SimpleConvolution2d3x5TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
+}
+
+LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                            bool                     biasEnabled,
+                                                            const armnn::DataLayoutIndexed& layout)
+{
+    return SimpleConvolution2d3x5TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
+}
+
+LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
+                                                     bool                     biasEnabled,
+                                                     const armnn::DataLayoutIndexed& layout)
+{
+    return SimpleConvolution2d3x3TestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, layout);
+}
+
+LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
+                                                         bool                     biasEnabled)
+{
+    return SimpleConvolution2d3x3NhwcTestCommon<float>(workloadFactory, 0.f, 0, biasEnabled, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                            bool                     biasEnabled,
+                                                            const armnn::DataLayoutIndexed& layout)
+{
+    return SimpleConvolution2d3x3TestCommon<uint8_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
+}
+
+template<typename T>
+LayerTestResult<T, 4> Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon(
+    armnn::IWorkloadFactory& workloadFactory,
+    const armnn::DataLayoutIndexed& layout,
+    float                    qScale,
+    int32_t                  qOffset)
+{
+    // Use a single-batch 1-channel 3x3 image as input.
+    armnn::TensorInfo inputDesc({1, 1, 3, 3}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            11,21,31,
+            12,22,32,
+            13,23,33
+        })));
+
+    // Use 1 batch of a 1-channel 2x2 kernel.
+    armnn::TensorInfo kernelDesc({1, 1, 2, 2}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            -11,-21,
+            -12,-22,
+        })));
+
+// Expected output is 1 batch of a 1-channel 6x8 image.
+// Manually calculated like this:
+//[-11*0 -21*0  -12*0 -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0 -12*0  -22*0 ..]
+//[-11*0 -21*0  -12*0 -22*11 ; -11*0  -21*0  -12*11 -22*21 ; -11*0  -21*0  -12*21 -22*31 ; -11*0  -21*0 -12*31 -22*0 ..]
+//[-11*0 -21*11 -12*0 -22*12 ; -11*11 -21*21 -12*12 -22*22 ; -11*21 -21*31 -12*22 -22*32 ; -11*31 -21*0 -12*32 -22*0 ..]
+//[-11*0 -21*12 -12*0 -22*13 ; -11*12 -21*22 -12*13 -22*23 ; -11*22 -21*32 -12*23 -22*33 ; -11*32 -21*0 -12*33 -22*0 ..]
+//[-11*0 -21*13 -12*0 -22*0  ; -11*13 -21*23 -12*0  -22*0  ; -11*23 -21*33 -12*0  -22*0  ; -11*33 -21*0 -12*0  -22*0 ..]
+//[-11*0 -21*0  -12*0 -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0  -12*0  -22*0  ; -11*0  -21*0 -12*0  -22*0 ..]
+//[..... .....  ..... .....  ; .....  .....  .....  .....  ; .....  .....  .....  .....  ; .....  ..... .....  ..... ..]
+    armnn::TensorInfo outputDesc({1, 1, 8, 6}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+               0,    0,      0,    0,    0,    0,
+            -242,  -594,  -934, -372,    0,    0,
+            -495, -1190, -1850, -725,    0,    0,
+            -538, -1256, -1916, -748,    0,    0,
+            -273, -626,  -946,  -363,    0,    0,
+               0,    0,     0,     0,    0,    0,
+               0,    0,     0,     0,    0,    0,
+               0,    0,     0,     0,    0,    0
+        })));
+
+    return SimpleConvolution2dTestImpl<T>(workloadFactory,
+      input,
+      kernel,
+      GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
+      expectedOutput,
+      qScale,
+      qOffset,
+      layout,
+      1,  // Padding left.
+      2,  // Padding top.
+      3,  // Padding right.
+      4); // Padding bottom.
+}
+
+template<typename T>
+LayerTestResult<T, 4> SimpleConvolution2dAsymmetricPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                                     const armnn::DataLayoutIndexed& layout,
+                                                                     float qScale,
+                                                                     int32_t qOffset)
+{
+    // Use a single-batch 1-channel 5x5 image as input.
+    armnn::TensorInfo inputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> input = MakeTensor<T, 4>(inputDesc, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            11,21,31,41,51,
+            12,22,32,42,52,
+            13,23,33,43,53,
+            14,24,34,44,54,
+            15,25,35,45,55,
+        })));
+
+    // Use 1 batch of a 1-channel 4x4 kernel.
+    armnn::TensorInfo kernelDesc({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> kernel = MakeTensor<T, 4>(kernelDesc, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            -11,-21,-31,-41,
+            -12,-22,-32,-42,
+            -13,-23,-33,-43,
+            -14,-24,-34,-44,
+        })));
+
+    // Expected output is 1 batch of a 1-channel 5x5 image.
+    armnn::TensorInfo outputDesc({ 1, 1, 5, 5 }, armnn::GetDataType<T>());
+    std::vector<T> myVec(outputDesc.GetNumElements(), 0);
+    boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputDesc, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            -7140, -10580, -13940,  -9300, -5230,
+            -9590, -14120, -18520, -12290, -6860,
+            -9980, -14560, -18960, -12560, -7000,
+            -7518, -10904, -14144,  -9318, -5152,
+            -5032,  -7256,  -9376,  -6142, -3368,
+        })));
+
+    return SimpleConvolution2dTestImpl<T>(workloadFactory,
+        input,
+        kernel,
+        GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(false, qScale, qOffset),
+        expectedOutput,
+        qScale,
+        qOffset,
+        layout,
+        1,  // Padding left.
+        1,  // Padding top.
+        2,  // Padding right.
+        2); // Padding bottom.
+}
+
+template<typename T>
+LayerTestResult<T, 4> DepthwiseConvolution2dAsymmetricTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                                 float qScale,
+                                                                 int32_t qOffset,
+                                                                 bool biasEnabled,
+                                                                 const armnn::DataLayoutIndexed& layout)
+{
+    // Use a single-batch 2-channel 5x5 image as input.
+    armnn::TensorInfo inputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
+    auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
+        QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
+             0,  1,  2,  3,  4,
+             5,  6,  7,  8,  9,
+            10, 11, 12, 13, 14,
+            15, 16, 17, 18, 19,
+            20, 21, 22, 23, 24,
+
+            25, 26, 27, 28, 29,
+            30, 31, 32, 33, 34,
+            35, 36, 37, 38, 39,
+            40, 41, 42, 43, 44,
+            45, 46, 47, 48, 49
+        })));
+
+    // Use a depth multiplier of 1 on a 2-channel 4x4 kernel.
+    armnn::TensorInfo kernelTensorInfo({ 1, 2, 4, 4 }, armnn::GetDataType<T>());
+    auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
+        QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
+            32, 31, 30, 29,
+            28, 27, 26, 25,
+            24, 23, 22, 21,
+            20, 19, 18, 17,
+
+            16, 15, 14, 13,
+            12, 11, 10,  9,
+             8,  7,  6,  5,
+             4,  3,  2,  1
+        })));
+
+    // Expected output is 1 batch of a 2-channel 5x5 image.
+    // Calculated using the python tensorflow library with strideX=1, strideY=1.
+    armnn::TensorInfo outputTensorInfo({ 1, 2, 5, 5 }, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
+        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
+            1062, 1580, 1850, 1530, 1117,
+            2140, 3108, 3500, 2842, 2042,
+            3580, 5068, 5460, 4342, 3062,
+            3618, 5072, 5390, 4248, 2971,
+            3074, 4282, 4510, 3533, 2457,
+            1550, 2284, 2362, 1955, 1428,
+            2910, 4206, 4342, 3528, 2536,
+            3390, 4886, 5022, 4068, 2916,
+            3566, 5056, 5182, 4133, 2922,
+            3100, 4352, 4452, 3517, 2465
+        })));
+
+    return DepthwiseConvolution2dAsymmetricTestImpl<T>(workloadFactory,
+        input,
+        kernel,
+        GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
+        expectedOutput,
+        qScale,
+        qOffset,
+        layout,
+        1,  // Padding left.
+        1,  // Padding top.
+        2,  // Padding right.
+        2,  // Padding bottom.
+        1,  // strideX
+        1); // strideY
+}
+
+template<typename T>
+LayerTestResult<T, 4> DepthwiseConvolution2dNhwcTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                           float qScale,
+                                                           int32_t qOffset,
+                                                           bool biasEnabled)
+{
+    armnn::TensorInfo inputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
+    auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
+        QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(), inputTensorInfo.GetQuantizationOffset(), {
+            0, 25,
+            1, 26,
+            2, 27,
+            3, 28,
+            4, 29,
+
+            5, 30,
+            6, 31,
+            7, 32,
+            8, 33,
+            9, 34,
+
+            10, 35,
+            11, 36,
+            12, 37,
+            13, 38,
+            14, 39,
+
+            15, 40,
+            16, 41,
+            17, 42,
+            18, 43,
+            19, 44,
+
+            20, 45,
+            21, 46,
+            22, 47,
+            23, 48,
+            24, 49
+        })));
+
+    armnn::TensorInfo kernelTensorInfo({ 1, 4, 4, 2}, armnn::GetDataType<T>());
+    auto kernel = MakeTensor<T, 4>(kernelTensorInfo, std::vector<T>(
+        QuantizedVector<T>(kernelTensorInfo.GetQuantizationScale(), kernelTensorInfo.GetQuantizationOffset(), {
+             32, 16,
+             31, 15,
+             30, 14,
+             29, 13,
+
+             28, 12,
+             27, 11,
+             26, 10,
+             25,  9,
+
+             24,  8,
+             23,  7,
+             22,  6,
+             21,  5,
+
+             20,  4,
+             19,  3,
+             18,  2,
+             17,  1
+        })));
+
+    armnn::TensorInfo outputTensorInfo({ 1, 5, 5, 2}, armnn::GetDataType<T>());
+    boost::multi_array<T, 4> expectedOutput = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(
+        QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(), outputTensorInfo.GetQuantizationOffset(), {
+        1062, 1550,
+        1580, 2284,
+        1850, 2362,
+        1530, 1955,
+        1117, 1428,
+
+        2140, 2910,
+        3108, 4206,
+        3500, 4342,
+        2842, 3528,
+        2042, 2536,
+
+        3580, 3390,
+        5068, 4886,
+        5460, 5022,
+        4342, 4068,
+        3062, 2916,
+
+        3618, 3566,
+        5072, 5056,
+        5390, 5182,
+        4248, 4133,
+        2971, 2922,
+
+        3074, 3100,
+        4282, 4352,
+        4510, 4452,
+        3533, 3517,
+        2457, 2465
+        })));
+
+    return DepthwiseConvolution2dNhwcTestImpl<T>(workloadFactory,
+        input,
+        kernel,
+        GetBias2<typename FullyConnectedBiasTypeForInputType<T>::Type>(biasEnabled, qScale, qOffset),
+        expectedOutput,
+        qScale,
+        qOffset,
+        1,  // Padding left.
+        1,  // Padding top.
+        2,  // Padding right.
+        2,  // Padding bottom.
+        1,  // strideX
+        1);  // strideY
+}
+
+LayerTestResult<float, 4>
+Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory,
+                                                           const armnn::DataLayoutIndexed& layout)
+{
+    return Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTestCommon<float>(workloadFactory, layout, 0.0f, 0);
+}
+
+LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory,
+                                                             const armnn::DataLayoutIndexed& layout)
+{
+    return SimpleConvolution2dAsymmetricPaddingTestCommon<float>(workloadFactory, layout, 0.0f, 0);
+}
+
+LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                                     bool                     biasEnabled,
+                                                     const armnn::DataLayoutIndexed& layout)
+{
+    return DepthwiseConvolution2dTestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
+}
+
+LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory& workloadFactory,
+                                                              bool biasEnabled)
+{
+    return DepthwiseConvolution2dNhwcTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled);
+}
+
+LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
+                                                              bool biasEnabled,
+                                                              const armnn::DataLayoutIndexed& layout)
+{
+    return DepthwiseConvolution2dDepthMul1TestImpl<float, float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
+}
+
+LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
+                                                               bool biasEnabled,
+                                                               const armnn::DataLayoutIndexed& layout)
+{
+    return DepthwiseConvolution2dAsymmetricTestCommon<float>(workloadFactory, 0.0f, 0, biasEnabled, layout);
+}
+
+LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                            bool                     biasEnabled,
+                                                            const armnn::DataLayoutIndexed& layout)
+{
+    return DepthwiseConvolution2dTestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
+}
+
+LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                     bool biasEnabled,
+                                                                     const armnn::DataLayoutIndexed& layout)
+{
+    return DepthwiseConvolution2dDepthMul1TestImpl<uint8_t, int32_t>(workloadFactory, 0.5f, 50, biasEnabled, layout);
+}
+
+LayerTestResult<float, 4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
+{
+    return Convolution1dTestImpl<float>(workloadFactory, 0.0f, 0, biasEnabled);
+}
+
+LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled)
+{
+    return Convolution1dTestImpl<uint8_t>(workloadFactory, 0.1f, 128, biasEnabled);
+}
+
+LayerTestResult<float,4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                                  armnn::IWorkloadFactory& refWorkloadFactory)
+{
+    return CompareConvolution2dTestImpl<float>(workloadFactory, refWorkloadFactory);
+}
+
+template<typename T>
+LayerTestResult<T,4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                                       armnn::IWorkloadFactory& refWorkloadFactory,
+                                                       const armnn::DataLayoutIndexed& layout)
+{
+    return CompareDepthwiseConvolution2dTestImpl<T>(workloadFactory, refWorkloadFactory, layout);
+}
+
+template LayerTestResult<float, 4> CompareDepthwiseConvolution2dTest<float>(
+    armnn::IWorkloadFactory&, armnn::IWorkloadFactory&, const armnn::DataLayoutIndexed&);
+template LayerTestResult<uint8_t, 4> CompareDepthwiseConvolution2dTest<uint8_t>(
+    armnn::IWorkloadFactory&, armnn::IWorkloadFactory&, const armnn::DataLayoutIndexed&);
+
+LayerTestResult<float,4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+    auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
+    return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
+}
+
+LayerTestResult<float,4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+    auto normChannel = armnn::NormalizationAlgorithmChannel::Within;
+    return SimpleNormalizationTestImpl(workloadFactory, normChannel, normMethod);
+}
+
+LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    auto normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+    auto normChannel = armnn::NormalizationAlgorithmChannel::Across;
+    return SimpleNormalizationNhwcTestImpl(workloadFactory, normChannel, normMethod);
+}
+
+LayerTestResult<float,2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta)
+{
+    return SimpleSoftmaxTestImpl<float>(workloadFactory, beta);
+}
+
+LayerTestResult<uint8_t,2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta)
+{
+    return SimpleSoftmaxTestImpl<uint8_t>(workloadFactory, beta);
+}
+
+LayerTestResult<float,4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
+                                                  armnn::IWorkloadFactory& refWorkloadFactory,
+                                                  armnn::NormalizationAlgorithmChannel normChannel,
+                                                  armnn::NormalizationAlgorithmMethod normMethod)
+{
+    return CompareNormalizationTestImpl(workloadFactory, refWorkloadFactory, normChannel, normMethod);
+}
+
+LayerTestResult<float,2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
+    armnn::IWorkloadFactory& refWorkloadFactory,
+    float beta)
+{
+    return CompareSoftmaxTestImpl<float>(workloadFactory, refWorkloadFactory, beta);
+}
+
+LayerTestResult<uint8_t,2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
+    armnn::IWorkloadFactory& refWorkloadFactory,
+    float beta)
+{
+    return CompareSoftmaxTestImpl<uint8_t>(workloadFactory, refWorkloadFactory, beta);
+}
+
+std::vector<LayerTestResult<float,3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return SplitterTestCommon<float>(workloadFactory);
+}
+
+std::vector<LayerTestResult<uint8_t,3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return SplitterTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
+}
+
+LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return CopyViaSplitterTestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return CopyViaSplitterTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
+}
+
+LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
+        armnn::IWorkloadFactory& workloadFactory)
+{
+    armnn::TensorInfo inputDesc({ 2, 2 }, armnn::GetDataType<float>());
+    boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
+            { 2., 3., 3., 4. }));
+
+    armnn::TensorInfo outputDesc({ 2, 4 }, armnn::GetDataType<float>());
+    boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
+            {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
+             -0.42734814f, -0.00478661f,  0.13455015f, -0.03560682f}));
+    return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
+}
+
+LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
+        armnn::IWorkloadFactory& workloadFactory)
+{
+    armnn::TensorInfo inputDesc({ 2, 5 }, armnn::GetDataType<float>());
+    boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
+            {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
+             0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
+
+    armnn::TensorInfo outputDesc({ 2, 16 }, armnn::GetDataType<float>());
+    boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
+            {-0.00396806f, 0.029352f,     -0.00279226f, 0.0159977f,   -0.00835576f,
+             -0.0211779f,  0.0283512f,    -0.0114597f,  0.00907307f,  -0.0244004f,
+             -0.0152191f,  -0.0259063f,   0.00914318f,  0.00415118f,  0.017147f,
+             0.0134203f, -0.013869f,    0.0287268f,   -0.00334693f, 0.00733398f,  -0.0287926f,
+             -0.0186926f,   0.0193662f,   -0.0115437f,  0.00422612f,  -0.0345232f,
+             0.00223253f,   -0.00957321f, 0.0210624f,   0.013331f,    0.0150954f,
+             0.02168f}));
+    return LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(workloadFactory, input, expectedOutput);
+}
+
+LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    armnn::TensorInfo inputDesc({2, 2}, armnn::GetDataType<float>());
+    boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
+            {2., 3., 3., 4.}));
+
+
+    armnn::TensorInfo outputDesc({2, 4}, armnn::GetDataType<float>());
+    boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
+            {{-0.02973187f, 0.1229473f,   0.20885126f, -0.15358765f,
+              -0.0185422f,   0.11281417f,  0.24466537f, -0.1826292f}}));
+
+    return LstmNoCifgNoPeepholeNoProjectionTestImpl(workloadFactory, input, expectedOutput);
+}
+
+LayerTestResult<float,3> MergerTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int outputWidth = 3;
+    unsigned int outputHeight = 6;
+    unsigned int outputChannels = 3;
+
+    unsigned int inputWidth1 = 3;
+    unsigned int inputHeight1 = 6;
+    unsigned int inputChannels1 = 2;
+
+    unsigned int inputWidth2 = 3;
+    unsigned int inputHeight2 = 6;
+    unsigned int inputChannels2 = 1;
+
+    // Define the tensor descriptors.
+    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::Float32);
+    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::Float32);
+    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::Float32);
+
+    LayerTestResult<float,3> ret(outputTensorInfo);
+
+    ret.outputExpected = MakeTensor<float, 3>(outputTensorInfo, std::vector<float>(
+    {
+            1.0f, 2.0f, 3.0f,
+            4.0f, 5.0f, 6.0f,
+            7.0f, 8.0f, 9.0f,
+            10.0f, 11.0f, 12.0f,
+            13.0f, 14.0f, 15.0f,
+            16.0f, 17.0f, 18.0f,
+
+            19.0f, 20.0f, 21.0f,
+            22.0f, 23.0f, 24.0f,
+            25.0f, 26.0f, 27.0f,
+            28.0f, 29.0f, 30.0f,
+            31.0f, 32.0f, 33.0f,
+            34.0f, 35.0f, 36.0f,
+
+            37.0f, 38.0f, 39.0f,
+            40.0f, 41.0f, 42.0f,
+            43.0f, 44.0f, 45.0f,
+            46.0f, 47.0f, 48.0f,
+            49.0f, 50.0f, 51.0f,
+            52.0f, 53.0f, 54.0f,
+        })
+    );
+
+    auto input1 = MakeTensor<float, 3>(inputTensorInfo1, std::vector<float>(
+        {
+            1.0f, 2.0f, 3.0f,
+            4.0f, 5.0f, 6.0f,
+            7.0f, 8.0f, 9.0f,
+            10.0f, 11.0f, 12.0f,
+            13.0f, 14.0f, 15.0f,
+            16.0f, 17.0f, 18.0f,
+
+            19.0f, 20.0f, 21.0f,
+            22.0f, 23.0f, 24.0f,
+            25.0f, 26.0f, 27.0f,
+            28.0f, 29.0f, 30.0f,
+            31.0f, 32.0f, 33.0f,
+            34.0f, 35.0f, 36.0f,
+        })
+    );
+
+    auto input2 = MakeTensor<float, 3>(inputTensorInfo2, std::vector<float>(
+        {
+            37.0f, 38.0f, 39.0f,
+            40.0f, 41.0f, 42.0f,
+            43.0f, 44.0f, 45.0f,
+            46.0f, 47.0f, 48.0f,
+            49.0f, 50.0f, 51.0f,
+            52.0f, 53.0f, 54.0f,
+        })
+    );
+
+    std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of input[0].
+    armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
+
+    std::vector<unsigned int> wOrigin2 = {2, 0, 0}; //Extent of the window is defined by size of input[1].
+    armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    bool subTensorsSupported = workloadFactory.SupportsSubTensors();
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+        subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+            workloadFactory.CreateTensorHandle(inputTensorInfo1);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2  =
+        subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+            workloadFactory.CreateTensorHandle(inputTensorInfo2);
+
+    armnn::MergerQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    data.m_ViewOrigins.push_back(window1);
+    data.m_ViewOrigins.push_back(window2);
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
+
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+LayerTestResult<float,4> AdditionTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int batchSize = 2;
+    unsigned int channels  = 2;
+    unsigned int height    = 2;
+    unsigned int width     = 3;
+
+    armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int shape[] = {batchSize, channels, height, width};
+
+    inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+
+
+    auto input1 = MakeTensor<float, 4>(inputTensorInfo1, std::vector<float>(
+        {
+            0.0f, 2.0f, 1.0f,
+            0.2f, 1.0f, 2.0f,
+
+            1.0f, 2.0f, 1.0f,
+            0.2f, 1.0f, 2.0f,
+
+            0.0f, 2.0f, 1.0f,
+            4.2f, 1.0f, 2.0f,
+
+            0.0f, 0.0f, 1.0f,
+            0.2f, 1.0f, 2.0f,
+        }));
+
+    auto input2 = MakeTensor<float, 4>(inputTensorInfo2, std::vector<float>(
+        {
+            1.0f, 2.0f, 1.0f,
+            0.0f, 1.0f, 2.0f,
+
+            1.0f, 2.0f, -2.0f,
+            0.2f, 1.0f, 2.0f,
+
+            0.0f, 2.0f, 1.0f,
+            4.2f, 0.0f, -3.0f,
+
+            0.0f, 0.0f, 1.0f,
+            0.7f, 1.0f, 5.0f,
+        }));
+
+    LayerTestResult<float,4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>(
+        {
+            1.0f, 4.0f, 2.0f,
+            0.2f, 2.0f, 4.0f,
+
+            2.0f, 4.0f, -1.0f,
+            0.4f, 2.0f, 4.0f,
+
+            0.0f, 4.0f, 2.0f,
+            8.4f, 1.0f, -1.0f,
+
+            0.0f, 0.0f, 2.0f,
+            0.9f, 2.0f, 7.0f,
+        }));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::AdditionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+template <typename T>
+LayerTestResult<T, 4> AdditionBroadcastTestImpl(armnn::IWorkloadFactory& workloadFactory,
+    float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 1}, armnn::GetDataType<T>());
+    armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 2, 3}, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo1.SetQuantizationScale(qScale);
+        inputTensorInfo1.SetQuantizationOffset(qOffset);
+        inputTensorInfo2.SetQuantizationScale(qScale);
+        inputTensorInfo2.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
+        {
+            0.0f,
+            1.0f,
+
+            2.0f,
+            3.0f,
+
+            4.0f,
+            5.0f,
+        }));
+
+    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
+        {
+            0.5f, 1.5f, 2.5f,
+            3.5f, 4.5f, 5.5f,
+        }));
+
+    LayerTestResult<T,4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
+        {
+            0.5f, 1.5f, 2.5f,
+            4.5f, 5.5f, 6.5f,
+
+            2.5f, 3.5f, 4.5f,
+            6.5f, 7.5f, 8.5f,
+
+            4.5f, 5.5f, 6.5f,
+            8.5f, 9.5f, 10.5f,
+        }));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::AdditionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+template <typename T>
+LayerTestResult<T, 4> AdditionBroadcast1ElementTestImpl(armnn::IWorkloadFactory& workloadFactory,
+    float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
+    armnn::TensorInfo inputTensorInfo2 = armnn::TensorInfo({1, 1, 1, 1}, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo = armnn::TensorInfo({1, 3, 2, 3}, armnn::GetDataType<T>());
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo1.SetQuantizationScale(qScale);
+        inputTensorInfo1.SetQuantizationOffset(qOffset);
+        inputTensorInfo2.SetQuantizationScale(qScale);
+        inputTensorInfo2.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, QuantizedVector<T>(qScale, qOffset,
+        {
+             0.0f,  1.0f,  2.0f,
+             3.0f,  4.0f,  5.0f,
+             6.0f,  7.0f,  8.0f,
+             9.0f, 10.0f, 11.0f,
+            12.0f, 13.0f, 14.0f,
+            15.0f, 16.0f, 17.0f,
+        }));
+
+    auto input2 = MakeTensor<T, 4>(inputTensorInfo2, QuantizedVector<T>(qScale, qOffset,
+        {
+            0.5f,
+        }));
+
+    LayerTestResult<T,4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset,
+        {
+             0.5f,  1.5f,  2.5f,
+             3.5f,  4.5f,  5.5f,
+             6.5f,  7.5f,  8.5f,
+             9.5f, 10.5f, 11.5f,
+            12.5f, 13.5f, 14.5f,
+            15.5f, 16.5f, 17.5f,
+        }));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::AdditionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return AdditionBroadcastTestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return AdditionBroadcastTestImpl<uint8_t>(workloadFactory, 2.f, 0);
+}
+
+LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return AdditionBroadcast1ElementTestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return AdditionBroadcast1ElementTestImpl<uint8_t>(workloadFactory, 0.1333333f, 128);
+}
+
+LayerTestResult<float,4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
+                                             armnn::IWorkloadFactory& refWorkloadFactory)
+{
+    unsigned int batchSize = 4;
+    unsigned int channels  = 1;
+    unsigned int height    = 2;
+    unsigned int width     = 3;
+
+    armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int shape[] = {batchSize, channels, height, width};
+
+    inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+
+    auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 1232);
+    auto input2 = MakeRandomTensor<float, 4>(inputTensorInfo2, 456);
+
+    LayerTestResult<float,4> ret(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo2);
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::AdditionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    armnn::AdditionQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo1, inputHandle1Ref.get());
+    SetWorkloadInput(refData, refInfo, 1, inputTensorInfo2, inputHandle2Ref.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateAddition(refData, refInfo);
+
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
+    inputHandle1Ref->Allocate();
+    inputHandle2Ref->Allocate();
+    outputHandleRef->Allocate();
+
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle2Ref.get(), &input2[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+    refWorkloadFactory.Finalize();
+    workloadRef->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+
+    return ret;
+}
+
+namespace {
+template <typename T>
+LayerTestResult<T, 4> DivisionTestHelper(armnn::IWorkloadFactory& workloadFactory,
+                                         const unsigned int shape0[4],
+                                         const std::vector<T>& values0,
+                                         float scale0,
+                                         int32_t offset0,
+                                         const unsigned int shape1[4],
+                                         const std::vector<T> & values1,
+                                         float scale1,
+                                         int32_t offset1,
+                                         const unsigned int outShape[4],
+                                         const std::vector<T> & outValues,
+                                         float outScale,
+                                         int32_t outOffset)
+{
+    auto dataType = (std::is_same<T, uint8_t>::value ?
+                     armnn::DataType::QuantisedAsymm8 :
+                     armnn::DataType::Float32);
+
+    armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
+    armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
+    armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
+
+    inputTensorInfo0.SetQuantizationScale(scale0);
+    inputTensorInfo0.SetQuantizationOffset(offset0);
+
+    inputTensorInfo1.SetQuantizationScale(scale1);
+    inputTensorInfo1.SetQuantizationOffset(offset1);
+
+    outputTensorInfo.SetQuantizationScale(outScale);
+    outputTensorInfo.SetQuantizationOffset(outOffset);
+
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::DivisionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
+    AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateDivision(data, info);
+
+    inputHandle0->Allocate();
+    inputHandle1->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+    return result;
+}
+} // anonymous namespace
+
+LayerTestResult<float,4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int width = 2;
+    const unsigned int height = 2;
+    const unsigned int channelCount = 2;
+    const unsigned int batchSize = 2;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<float> input0({
+                                1.f,  1.f,  1.f,  1.f,  0.f, 0.f, 0.f, 0.f,
+                               -1.f, -1.f, -1.f, -1.f,  5.f, 5.f, 5.f, 5.f });
+
+    std::vector<float> input1({
+                               0.f, 0.f, -0.f, -0.f,  0.f, 0.f, -0.f, -0.f,
+                               0.f, 0.f, -0.f, -0.f,  5.f, 5.f,  5.f,  5.f });
+
+    std::vector<float> output({
+                               INFINITY, INFINITY, -INFINITY, -INFINITY,  NAN, NAN, -NAN, -NAN,
+                               -INFINITY, -INFINITY, INFINITY, INFINITY,  1, 1, 1, 1 });
+
+    return DivisionTestHelper<float>(workloadFactory,
+                                     shape, input0, 1.0f, 0,
+                                     shape, input1, 1.0f, 0,
+                                     shape, output, 1.0f, 0);
+}
+
+LayerTestResult<float,4> DivisionTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int width = 2;
+    const unsigned int height = 2;
+    const unsigned int channelCount = 2;
+    const unsigned int batchSize = 2;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<float> input0({
+                                      2,  2,  2,  2,    3,  3,  3,  3,
+                                      4,  4,  4,  4,    5,  5,  5,  5 });
+
+    std::vector<float> input1({
+                                      1,  1,  1,  1,    2,  2,  2,  2,
+                                      4,  4,  4,  4,    4,  4,  4,  4 });
+
+    std::vector<float> output({
+                                      2,  2,  2,  2,    1.5,  1.5,  1.5,  1.5,
+                                      1, 1, 1, 1,  1.25, 1.25, 1.25, 1.25 });
+
+
+    return DivisionTestHelper<float>(workloadFactory,
+                                     shape, input0, 1.0f, 0,
+                                     shape, input1, 1.0f, 0,
+                                     shape, output, 1.0f, 0);
+}
+
+LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    std::vector<float> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
+
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+    std::vector<float> input1({ 2 });
+
+    std::vector<float> output({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+
+    return DivisionTestHelper<float>(workloadFactory,
+                                     shape0, input0, 1.0f, 0,
+                                     shape1, input1, 1.0f, 0,
+                                     shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int shape0[] = { 1, 3, 3, 2 };
+    std::vector<float> input0({
+                                      1,   4,       3,  8,      5, 12,
+                                      7,   16,      9, 20,     11, 24,
+                                      13,  28,     15, 32,     17, 36});
+
+    unsigned int shape1[] = { 1, 1, 1, 2 };
+    std::vector<float> input1({ 1, 2 });
+
+    std::vector<float> output({
+                                      1,   2,      3,  4,      5,  6,
+                                      7,   8,      9, 10,     11, 12,
+                                      13, 14,     15, 16,     17, 18});
+
+    return DivisionTestHelper<float>(workloadFactory,
+                                     shape0, input0, 1.0f, 0,
+                                     shape1, input1, 1.0f, 0,
+                                     shape0, output, 1.0f, 0);
+}
+
+
+LayerTestResult<uint8_t,4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int width = 2;
+    const unsigned int height = 2;
+    const unsigned int channelCount = 2;
+    const unsigned int batchSize = 2;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<uint8_t> input0({2,  2,  2,  2,    3,  3,  3,  3,
+                                 4,  4,  4,  4,    5,  5,  5,  5 });
+
+    std::vector<uint8_t> input1({1,  1,  1,  1,    2,  2,  2,  2,
+                                 4,  4,  4,  4,    4,  4,  4,  4 });
+
+    std::vector<uint8_t> output({8,  8,  8,  8,    6,  6,  6,  6,
+                                 4,  4,  4,  4,    5,  5,  5,  5});
+
+
+    return DivisionTestHelper<uint8_t>(workloadFactory,
+                                     shape, input0, 1.0f,  0,
+                                     shape, input1, 1.0f,  0,
+                                     shape, output, 0.25f, 0);
+}
+
+LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    std::vector<uint8_t> input0({ 2, 4, 6, 8, 10, 12, 14, 16});
+
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+    std::vector<uint8_t> input1({ 2 });
+
+    std::vector<uint8_t> output({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+    return DivisionTestHelper<uint8_t>(workloadFactory,
+                                     shape0, input0, 1.0f, 0,
+                                     shape1, input1, 1.0f, 0,
+                                     shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int shape0[] = { 1, 3, 3, 2 };
+    std::vector<uint8_t> input0({1,   4,     3,  8,      5,  12,
+                                 7,   16,    9,  20,     11, 24,
+                                 13,  28,    15, 32,     17, 36});
+
+    unsigned int shape1[] = { 1, 1, 1, 2 };
+    std::vector<uint8_t> input1({ 1, 2 });
+
+    std::vector<uint8_t> output({1,   2,      3,  4,      5,  6,
+                                 7,   8,      9, 10,     11, 12,
+                                 13, 14,     15, 16,     17, 18});
+
+    return DivisionTestHelper<uint8_t>(workloadFactory,
+                                     shape0, input0, 1.0f, 0,
+                                     shape1, input1, 1.0f, 0,
+                                     shape0, output, 1.0f, 0);
+}
+
+namespace {
+LayerTestResult<float,4> MultiplicationTestHelper(armnn::IWorkloadFactory& workloadFactory,
+                                                  const unsigned int shape0[4],
+                                                  const std::vector<float> & values0,
+                                                  const unsigned int shape1[4],
+                                                  const std::vector<float> & values1,
+                                                  const unsigned int outShape[4],
+                                                  const std::vector<float> & outValues)
+{
+    const size_t dimensionCount = 4;
+    armnn::TensorInfo inputTensorInfo0{dimensionCount, shape0, armnn::DataType::Float32};
+    armnn::TensorInfo inputTensorInfo1{dimensionCount, shape1, armnn::DataType::Float32};
+    armnn::TensorInfo outputTensorInfo{dimensionCount, outShape, armnn::DataType::Float32};
+
+    auto input0 = MakeTensor<float, 4>(inputTensorInfo0, values0);
+    auto input1 = MakeTensor<float, 4>(inputTensorInfo1, values1);
+
+    LayerTestResult<float,4> ret(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::MultiplicationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
+
+    inputHandle0->Allocate();
+    inputHandle1->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outValues);
+    return ret;
+}
+} // anonymous namespace
+
+
+LayerTestResult<float,4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int width = 2;
+    const unsigned int height = 2;
+    const unsigned int channelCount = 2;
+    const unsigned int batchSize = 2;
+
+    unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    std::vector<float> input0({
+        1,  1,  1,  1,    2,  2,  2,  2,
+        3,  3,  3,  3,    4,  4,  4,  4 });
+
+    std::vector<float> input1({
+        2,  2,  2,  2,    3,  3,  3,  3,
+        4,  4,  4,  4,    5,  5,  5,  5 });
+
+    std::vector<float> output({
+        2,  2,  2,  2,    6,  6,  6,  6,
+        12, 12, 12, 12,  20, 20, 20, 20 });
+
+    return MultiplicationTestHelper(workloadFactory,
+                                    shape,
+                                    input0,
+                                    shape,
+                                    input1,
+                                    shape,
+                                    output);
+}
+
+LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int shape0[] = { 1, 2, 2, 2 };
+    std::vector<float> input0({ 1, 2, 3, 4, 5, 6, 7, 8});
+
+    unsigned int shape1[] = { 1, 1, 1, 1 };
+    std::vector<float> input1({ 2 });
+
+    std::vector<float> output({ 2, 4, 6, 8, 10, 12, 14, 16});
+
+    return MultiplicationTestHelper(workloadFactory,
+                                    shape0,
+                                    input0,
+                                    shape1,
+                                    input1,
+                                    shape0,
+                                    output);
+}
+
+LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int shape0[] = { 1, 3, 3, 2 };
+    std::vector<float> input0({
+        1,   2,      3,  4,      5,  6,
+        7,   8,      9, 10,     11, 12,
+        13, 14,     15, 16,     17, 18});
+
+    unsigned int shape1[] = { 1, 1, 1, 2 };
+    std::vector<float> input1({ 1, 2 });
+
+    std::vector<float> output({
+        1,   4,       3,  8,      5, 12,
+        7,   16,      9, 20,     11, 24,
+        13,  28,     15, 32,     17, 36});
+
+    return MultiplicationTestHelper(workloadFactory,
+                                    shape0,
+                                    input0,
+                                    shape1,
+                                    input1,
+                                    shape0,
+                                    output);
+}
+
+LayerTestResult<float,4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
+                                          armnn::IWorkloadFactory& refWorkloadFactory)
+{
+    const unsigned int width = 16;
+    const unsigned int height = 32;
+    const unsigned int channelCount = 2;
+    const unsigned int batchSize = 5;
+
+    armnn::TensorInfo inputTensorInfo0;
+    armnn::TensorInfo inputTensorInfo1;
+    armnn::TensorInfo outputTensorInfo;
+
+    constexpr unsigned int shape[] = { batchSize, channelCount, height, width };
+
+    inputTensorInfo0 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+
+    LayerTestResult<float,4> comparisonResult(outputTensorInfo);
+
+    auto input0 = MakeRandomTensor<float, 4>(inputTensorInfo0, 803506992);
+    auto input1 = MakeRandomTensor<float, 4>(inputTensorInfo1, 54902257);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle0Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo0);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1Ref = refWorkloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::MultiplicationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo0, inputHandle0.get());
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    armnn::MultiplicationQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo0, inputHandle0Ref.get());
+    SetWorkloadInput(refData, refInfo, 1, inputTensorInfo1, inputHandle1Ref.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateMultiplication(refData, refInfo);
+
+    inputHandle0->Allocate();
+    inputHandle1->Allocate();
+    outputHandle->Allocate();
+    inputHandle0Ref->Allocate();
+    inputHandle1Ref->Allocate();
+    outputHandleRef->Allocate();
+
+    CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle0Ref.get(), &input0[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle1Ref.get(), &input1[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+    refWorkloadFactory.Finalize();
+    workloadRef->Execute();
+
+    CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
+
+    return comparisonResult;
+}
+
+LayerTestResult<float,4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
+                                     armnn::IWorkloadFactory& refWorkloadFactory)
+{
+    const unsigned int width     = 2;
+    const unsigned int height    = 3;
+    const unsigned int channels  = 5;
+    const unsigned int batchSize = 3;
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+    armnn::TensorInfo tensorInfo;
+
+    constexpr unsigned int shape[]       = {batchSize, channels, height, width};
+    constexpr unsigned int tensorShape[] = {channels};
+
+    inputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    tensorInfo = armnn::TensorInfo(1, tensorShape, armnn::DataType::Float32);
+
+    auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 21312);
+
+    auto mean     = MakeRandomTensor<float, 1>(tensorInfo, 123);
+    auto variance = MakeRandomTensor<float, 1>(tensorInfo, 234, 0.0f);
+    auto beta     = MakeRandomTensor<float, 1>(tensorInfo, 123);
+    auto gamma    = MakeRandomTensor<float, 1>(tensorInfo, 345);
+
+    LayerTestResult<float,4> ret(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandleRef  = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::BatchNormalizationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    armnn::ScopedCpuTensorHandle meanTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle varianceTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle betaTensor(tensorInfo);
+    armnn::ScopedCpuTensorHandle gammaTensor(tensorInfo);
+
+    AllocateAndCopyDataToITensorHandle(&meanTensor, &mean[0]);
+    AllocateAndCopyDataToITensorHandle(&varianceTensor, &variance[0]);
+    AllocateAndCopyDataToITensorHandle(&betaTensor, &beta[0]);
+    AllocateAndCopyDataToITensorHandle(&gammaTensor, &gamma[0]);
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Mean             = &meanTensor;
+    data.m_Variance         = &varianceTensor;
+    data.m_Beta             = &betaTensor;
+    data.m_Gamma            = &gammaTensor;
+    data.m_Parameters.m_Eps = 0.01f;
+
+    armnn::BatchNormalizationQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateBatchNormalization(data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateBatchNormalization(refData, refInfo);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    inputHandleRef->Allocate();
+    outputHandleRef->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+    refWorkloadFactory.Finalize();
+    workloadRef->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+
+    return ret;
+}
+
+template<typename T>
+void PermuteTensorData(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::PermutationVector& mappings,
+        armnn::TensorInfo & inputTensorInfo,
+        const T * inputData,
+        std::vector<T>& outputData)
+{
+    BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
+    if (inputData == nullptr)
+    {
+        // Nullptr is an error in the test. By returning without doing the concatenation
+        // I expect the caller to fail the test. It still makes sense to report this as
+        // an assert for Debug builds.
+        return;
+    }
+
+    armnn::TensorInfo outputTensorInfo = armnnUtils::Permuted(inputTensorInfo, mappings);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::PermuteQueueDescriptor queueDescriptor;
+    queueDescriptor.m_Parameters = armnn::PermuteDescriptor{mappings};
+    armnn::WorkloadInfo workloadInfo;
+    AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(queueDescriptor, workloadInfo);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), inputData);
+
+    workload->Execute();
+
+    outputData.resize(outputTensorInfo.GetNumElements());
+    CopyDataFromITensorHandle(&outputData[0], outputHandle.get());
+    inputTensorInfo = outputTensorInfo;
+}
+
+armnn::OriginsDescriptor CreateMergerDescriptorForConcatenation(
+        const std::vector<armnn::TensorInfo> & inputTensorInfos,
+        unsigned int concatDim)
+{
+    std::vector<armnn::TensorShape> shapes;
+    shapes.reserve(inputTensorInfos.size());
+    for (const armnn::TensorInfo& it: inputTensorInfos)
+    {
+        shapes.push_back(it.GetShape());
+    }
+
+    return armnn::CreateMergerDescriptorForConcatenation(shapes.begin(),
+                                                         shapes.end(),
+                                                         concatDim);
+}
+
+//
+// Concatenation is only supported for N and C dimensions for NCHW. In case of
+// <4 dimensions we need to make sure that the concat dimensions are at least
+// the 3rd slowest iterating one.
+//
+
+bool NeedPermuteForConcat(
+        const std::vector<armnn::TensorInfo> & inputTensorInfos,
+        unsigned int concatDim)
+{
+    // See note above. Additionally we expect the input shapes to have the
+    // same number of dimensions.
+    unsigned int nDimensions = 0;
+
+    // Determine the number of dimensions as well as sanity check them
+    // agains test implementation issues.
+    for (auto && tensorInfo : inputTensorInfos)
+    {
+        if (!nDimensions)
+        {
+            nDimensions = tensorInfo.GetShape().GetNumDimensions();
+        }
+        else
+        {
+            BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
+                "Input shapes must have the same number of dimensions");
+        }
+    }
+
+    return (nDimensions-concatDim) < 3;
+}
+
+armnn::TensorShape ExpandTensorShapeTo3dForPermute(const armnn::TensorShape & inputShape)
+{
+    unsigned int numDims = inputShape.GetNumDimensions();
+    if (numDims >= 3)
+    {
+        // Nothing to do if the inputShape has at least 3 dimensions.
+        return inputShape;
+    }
+
+    std::vector<unsigned int> newDims(size_t(3), 1u);
+    unsigned int expandedBy = 3 - numDims;
+    for (unsigned int i=0; i<numDims; ++i)
+    {
+        newDims[expandedBy+i] = inputShape[i];
+    }
+    return armnn::TensorShape(3u, &newDims[0]);
+}
+
+void Generate3dPermuteVectorForConcat(
+        unsigned int numDimensions,
+        unsigned int & concatDim,
+        std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutations)
+{
+    BOOST_ASSERT_MSG(numDimensions <= 3,
+       "Only dimensions 1,2 and 3 are supported by this helper");
+
+    unsigned int expandedBy = 3 - numDimensions;
+    unsigned int expandedConcatAxis = concatDim + expandedBy;
+
+    if (expandedConcatAxis == 2)
+    {
+        concatDim = 0;
+        armnn::PermutationVector forwardPermutation({1, 2, 0});
+        armnn::PermutationVector reversePermutation({2, 0, 1});
+        permutations = std::make_pair(forwardPermutation, reversePermutation);
+    }
+    else if (expandedConcatAxis == 1)
+    {
+        concatDim = 0;
+        armnn::PermutationVector forwardPermutation({2, 0, 1});
+        armnn::PermutationVector reversePermutation({1, 2, 0});
+        permutations = std::make_pair(forwardPermutation, reversePermutation);
+    }
+    else
+    {
+        BOOST_ASSERT(expandedConcatAxis == 0);
+        concatDim = 0;
+    }
+}
+
+//
+// Permute the input tensors so we can do a supported concatenation.
+// Also treat lower than 3d tensors as 3d by adding dummy 1 dimensions
+// at the front. Finally this function tells what the output shape
+// of the permuted concatenated tensor is going to be.
+//
+template <typename T>
+void PermuteInputsForConcat(
+        armnn::IWorkloadFactory& workloadFactory,
+        std::vector<armnn::TensorInfo> & inputTensorInfos,
+        std::vector<T *> & inputData,
+        std::vector<std::vector<T>> & inputDataStorage,
+        armnn::PermutationVector & permuteVector,
+        unsigned int & concatDim,
+        armnn::TensorInfo & outputTensorInfo)
+{
+    BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
+        "Expecting more than one tensor to be concatenated here");
+
+    unsigned int numDims = 0;
+    unsigned int nthInput = 0;
+    const armnn::PermutationVector identity({0, 1, 2});
+
+    std::pair<armnn::PermutationVector, armnn::PermutationVector> permutations =
+        std::make_pair(identity, identity);
+
+    inputDataStorage.resize(inputData.size());
+
+    for (auto && tensorInfo : inputTensorInfos)
+    {
+        if (numDims == 0)
+        {
+            numDims = tensorInfo.GetShape().GetNumDimensions();
+            Generate3dPermuteVectorForConcat(numDims, concatDim, permutations);
+            // Store the reverese permutation.
+            permuteVector = permutations.second;
+            BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
+                "Test logic error, we don't need permutation, so we shouldn't arrive here");
+        }
+        else
+        {
+            BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
+                "All inputs must have the same number of dimensions");
+        }
+
+        armnn::TensorInfo newTensorInfo = tensorInfo;
+        newTensorInfo.SetShape(ExpandTensorShapeTo3dForPermute(tensorInfo.GetShape()));
+
+        PermuteTensorData<T>(workloadFactory,
+                             permutations.first,
+                             newTensorInfo,
+                             inputData[nthInput],
+                             inputDataStorage[nthInput]);
+
+        inputData[nthInput] = inputDataStorage[nthInput].data();
+        inputTensorInfos[nthInput] = newTensorInfo;
+
+        ++nthInput;
+    }
+
+    outputTensorInfo.SetShape(
+        armnnUtils::Permuted(
+            ExpandTensorShapeTo3dForPermute(outputTensorInfo.GetShape()),
+            permutations.first));
+}
+
+
+//
+// This is the pair of PermuteInputsForConcat(...) which permutes back
+// the output of the concatenation so we can check it against an expected
+// output.
+//
+template <typename T>
+void PermuteOutputForConcat(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::TensorInfo & tensorInfo,
+        const armnn::PermutationVector & permuteVector,
+        std::unique_ptr<armnn::ITensorHandle> && inputDataHandle,
+        T * data)
+{
+    BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
+    if (data == nullptr)
+    {
+        // Nullptr is an error in the test. By returning without doing the permutation
+        // I expect the caller to fail the test. It still makes sense to report this as
+        // an assert for Debug builds.
+        return;
+    }
+
+    armnn::TensorInfo resultTensorInfo = tensorInfo;
+    std::vector<T> inputData(tensorInfo.GetNumElements());
+    std::vector<T> outputData;
+
+    CopyDataFromITensorHandle(&inputData[0], inputDataHandle.get());
+
+    PermuteTensorData<T>(workloadFactory,
+                         permuteVector,
+                         resultTensorInfo,
+                         &inputData[0],
+                         outputData);
+
+    ::memcpy(data, &outputData[0], sizeof(T)*outputData.size());
+}
+
+template <typename T>
+void Concatenate(armnn::IWorkloadFactory& workloadFactory,
+                 std::initializer_list<const armnn::TensorInfo> inputTensorInfosOrig,
+                 std::initializer_list<T *> inputsOrig,
+                 const armnn::TensorInfo& outputTensorInfoOrig,
+                 T * output,
+                 unsigned int concatDim)
+{
+    BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
+    if (output == nullptr)
+    {
+        // Nullptr is an error in the test. By returning without doing the permutation
+        // I expect the caller to fail the test. It still makes sense to report this as
+        // an assert for Debug builds.
+        return;
+    }
+
+    armnn::MergerQueueDescriptor queueDescriptor;
+
+    // Saves a copy of the parameters which we might need to change.
+    std::vector<armnn::TensorInfo> inputTensorInfos(inputTensorInfosOrig.begin(), inputTensorInfosOrig.end());
+    std::vector<T *> inputs            = inputsOrig;
+    armnn::TensorInfo outputTensorInfo = outputTensorInfoOrig;
+
+    armnn::PermutationVector permuteVector{0, 1, 2};
+
+    // Holds and automatically releases memory for the reshaped input data.
+    std::vector<std::vector<T>> tmpInputDataStorage;
+
+    const size_t inputCount = inputTensorInfos.size();
+
+    bool needPermuteForConcat = NeedPermuteForConcat(inputTensorInfos, concatDim);
+
+    if (needPermuteForConcat)
+    {
+        //
+        // We need to permute the inputs, because concatenation along
+        // the requested axis is not supported.
+        //
+        PermuteInputsForConcat<T>(workloadFactory,
+                                  inputTensorInfos,
+                                  inputs,
+                                  tmpInputDataStorage,
+                                  permuteVector,
+                                  concatDim,
+                                  outputTensorInfo);
+    }
+
+    armnn::OriginsDescriptor viewsDescriptor = CreateMergerDescriptorForConcatenation(inputTensorInfos, concatDim);
+
+    queueDescriptor.m_ViewOrigins.reserve(viewsDescriptor.GetNumViews());
+    for (unsigned int i = 0; i < viewsDescriptor.GetNumViews(); ++i)
+    {
+        queueDescriptor.m_ViewOrigins.emplace_back(std::vector<unsigned int>(viewsDescriptor.GetViewOrigin(i),
+            viewsDescriptor.GetViewOrigin(i) + viewsDescriptor.GetNumDimensions()));
+    }
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    std::vector<std::unique_ptr<armnn::ITensorHandle>> inputHandles;
+    inputHandles.reserve(inputCount);
+
+    const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
+    for (unsigned int i = 0; i < inputCount; ++i)
+    {
+        const armnn::TensorInfo& inputTensorInfo = inputTensorInfos[i];
+
+        std::unique_ptr<armnn::ITensorHandle> inputHandle = subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo.GetShape(),
+                queueDescriptor.m_ViewOrigins[i].m_Origin.data())
+            : workloadFactory.CreateTensorHandle(inputTensorInfo);
+
+        inputHandles.emplace_back(std::move(inputHandle));
+    }
+
+    armnn::WorkloadInfo workloadInfo;
+
+    for (unsigned int i = 0; i < inputCount; ++i)
+    {
+        AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfos[i], inputHandles[i].get());
+    }
+
+    AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(queueDescriptor, workloadInfo);
+
+    for (auto& inputHandle : inputHandles)
+    {
+        inputHandle->Allocate();
+    }
+
+    outputHandle->Allocate();
+
+    unsigned int nextInputId = 0;
+    for (auto& inputHandle : inputHandles)
+    {
+        CopyDataToITensorHandle(inputHandle.get(), inputs[nextInputId]);
+        ++nextInputId;
+    }
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    if (needPermuteForConcat)
+    {
+        PermuteOutputForConcat<T>(workloadFactory,
+                                  outputTensorInfo,
+                                  permuteVector,
+                                  std::move(outputHandle),
+                                  output);
+    }
+    else
+    {
+        CopyDataFromITensorHandle(output, outputHandle.get());
+    }
+}
+
+template <typename T>
+LayerTestResult<T, 1> Concatenation1dTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
+{
+    armnn::TensorInfo inputTensorInfo({ 3 }, armnn::GetDataType<T>());
+
+    auto input0 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 1.0f, 2.0f, 3.0f }));
+    auto input1 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 4.0f, 5.0f, 6.0f }));
+    auto input2 = MakeTensor<T, 1>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, { 7.0f, 8.0f, 9.0f }));
+
+    armnn::TensorInfo outputTensorInfo({ 9 }, armnn::GetDataType<T>());
+
+    LayerTestResult<T, 1> result(outputTensorInfo);
+
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory,
+        { inputTensorInfo, inputTensorInfo, inputTensorInfo },
+        { input0.data(), input1.data(), input2.data() },
+        outputTensorInfo,
+        output.data(),
+        0);
+
+    result.output = MakeTensor<T, 1>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 1>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f
+    }));
+
+    return result;
+}
+
+LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation1dTestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+template <typename T>
+LayerTestResult<T, 2> Concatenation2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
+    const armnn::TensorInfo& outputTensorInfo,
+    unsigned int dimension,
+    const float qScale,
+    const int32_t qOffset)
+{
+    armnn::TensorInfo inputTensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+
+    auto input0 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f,
+
+        // Batch 1
+        10.0f, 11.0f, 12.0f,
+    }));
+
+    auto input1 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        4.0f, 5.0f, 6.0f,
+
+        // Batch 1
+        13.0f, 14.0f, 15.0f,
+    }));
+
+    auto input2 = MakeTensor<T, 2>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        7.0f, 8.0f, 9.0f,
+
+        // Batch 1
+        16.0f, 17.0f, 18.0f,
+    }));
+
+    LayerTestResult<T, 2> result(outputTensorInfo);
+
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory,
+        { inputTensorInfo, inputTensorInfo, inputTensorInfo },
+        { input0.data(), input1.data(), input2.data() },
+        outputTensorInfo,
+        output.data(),
+        dimension);
+
+    result.output = MakeTensor<T, 2>(outputTensorInfo, output);
+    return result;
+}
+
+template <typename T>
+LayerTestResult<T, 2> Concatenation2dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory,
+    float qScale, int32_t qOffset)
+{
+    armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
+
+    LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 0, qScale, qOffset);
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f,
+
+        // Batch 1
+        10.0f, 11.0f, 12.0f,
+
+        // Batch 2
+        4.0f, 5.0f, 6.0f,
+
+        // Batch 3
+        13.0f, 14.0f, 15.0f,
+
+        // Batch 4
+        7.0f, 8.0f, 9.0f,
+
+        // Batch 5
+        16.0f, 17.0f, 18.0f,
+    }));
+
+    return result;
+}
+
+LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation2dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+template <typename T>
+LayerTestResult<T, 2> Concatenation2dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
+    float qScale, int32_t qOffset)
+{
+    armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
+
+    LayerTestResult<T, 2> result = Concatenation2dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
+
+        // Batch 1
+        10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f
+    }));
+
+    return result;
+}
+
+LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation2dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+template <typename T>
+LayerTestResult<T, 2> Concatenation2dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f,
+
+        // Batch 1
+        10.0f, 11.0f, 12.0f,
+    }));
+
+    armnn::TensorInfo input1TensorInfo({ 3, 3 }, armnn::GetDataType<T>());
+    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        4.0f, 5.0f, 6.0f,
+
+        // Batch 1
+        13.0f, 14.0f, 15.0f,
+
+        // Batch 0
+        7.0f, 8.0f, 9.0f,
+    }));
+
+    armnn::TensorInfo input2TensorInfo({ 1, 3 }, armnn::GetDataType<T>());
+    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 1
+        16.0f, 17.0f, 18.0f,
+    }));
+
+    armnn::TensorInfo outputTensorInfo({ 6, 3 }, armnn::GetDataType<T>());
+    LayerTestResult<T, 2> result(outputTensorInfo);
+
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory,
+        { input0TensorInfo, input1TensorInfo, input2TensorInfo },
+        { input0.data(), input1.data(), input2.data() },
+        outputTensorInfo,
+        output.data(),
+        0);
+
+    result.output = MakeTensor<T, 2>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f,
+
+        // Batch 1
+        10.0f, 11.0f, 12.0f,
+
+        // Batch 2
+        4.0f, 5.0f, 6.0f,
+
+        // Batch 3
+        13.0f, 14.0f, 15.0f,
+
+        // Batch 4
+        7.0f, 8.0f, 9.0f,
+
+        // Batch 5
+        16.0f, 17.0f, 18.0f,
+    }));
+
+    return result;
+}
+
+LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation2dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+template <typename T>
+LayerTestResult<T, 2> Concatenation2dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo input0TensorInfo({ 2, 3 }, armnn::GetDataType<T>());
+    auto input0 = MakeTensor<T, 2>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f,
+
+        // Batch 1
+        10.0f, 11.0f, 12.0f,
+    }));
+
+    armnn::TensorInfo input1TensorInfo({ 2, 5 }, armnn::GetDataType<T>());
+    auto input1 = MakeTensor<T, 2>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
+
+        // Batch 1
+        13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
+    }));
+
+    armnn::TensorInfo input2TensorInfo({ 2, 1 }, armnn::GetDataType<T>());
+    auto input2 = MakeTensor<T, 2>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        9.0f,
+
+        // Batch 1
+        18.0f
+    }));
+
+    armnn::TensorInfo outputTensorInfo({ 2, 9 }, armnn::GetDataType<T>());
+    LayerTestResult<T, 2> result(outputTensorInfo);
+
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory,
+        { input0TensorInfo, input1TensorInfo, input2TensorInfo },
+        { input0.data(), input1.data(), input2.data() },
+        outputTensorInfo,
+        output.data(),
+        1);
+
+    result.output = MakeTensor<T, 2>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0
+        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f,
+
+        // Batch 1
+        10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f,
+    }));
+
+    return result;
+}
+
+LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation2dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+template <typename T>
+LayerTestResult<T, 3> Concatenation3dTestImpl(armnn::IWorkloadFactory& workloadFactory,
+    const armnn::TensorInfo& outputTensorInfo,
+    unsigned int dimension,
+    float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo inputTensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+
+    auto input0 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
+
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
+
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
+
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
+
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
+
+        // Batch 1, Channel 2
+        23.0f, 24.0f
+    }));
+
+    auto input1 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        7.0f, 8.0f,
+
+        // Batch 0, Channel 1
+        9.0f, 10.0f,
+
+        // Batch 0, Channel 2
+        11.0f, 12.0f,
+
+        // Batch 1, Channel 0
+        25.0f, 26.0f,
+
+        // Batch 1, Channel 1
+        27.0f, 28.0f,
+
+        // Batch 1, Channel 2
+        29.0f, 30.0f
+    }));
+
+    auto input2 = MakeTensor<T, 3>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        13.0f, 14.0f,
+
+        // Batch 0, Channel 1
+        15.0f, 16.0f,
+
+        // Batch 0, Channel 2
+        17.0f, 18.0f,
+
+        // Batch 1, Channel 0
+        31.0f, 32.0f,
+
+        // Batch 1, Channel 1
+        33.0f, 34.0f,
+
+        // Batch 1, Channel 2
+        35.0f, 36.0f
+    }));
+
+    LayerTestResult<T, 3> result(outputTensorInfo);
+
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory,
+        { inputTensorInfo, inputTensorInfo, inputTensorInfo },
+        { input0.data(), input1.data(), input2.data() },
+        outputTensorInfo,
+        output.data(),
+        dimension);
+
+    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
+    return result;
+}
+
+template <typename T>
+LayerTestResult<T, 3> Concatenation3dDim0TestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
+
+    LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 0,
+        qScale, qOffset);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
+
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
+
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
+
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
+
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
+
+        // Batch 1, Channel 2
+        23.0f, 24.0f,
+
+        // Batch 2, Channel 0
+        7.0f, 8.0f,
+
+        // Batch 2, Channel 1
+        9.0f, 10.0f,
+
+        // Batch 2, Channel 2
+        11.0f, 12.0f,
+
+        // Batch 3, Channel 0
+        25.0f, 26.0f,
+
+        // Batch 3, Channel 1
+        27.0f, 28.0f,
+
+        // Batch 3, Channel 2
+        29.0f, 30.0f,
+
+        // Batch 4, Channel 0
+        13.0f, 14.0f,
+
+        // Batch 4, Channel 1
+        15.0f, 16.0f,
+
+        // Batch 4, Channel 2
+        17.0f, 18.0f,
+
+        // Batch 5, Channel 0
+        31.0f, 32.0f,
+
+        // Batch 5, Channel 1
+        33.0f, 34.0f,
+
+        // Batch 5, Channel 2
+        35.0f, 36.0f
+    }));
+    return result;
+}
+
+LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim0TestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+template <typename T>
+LayerTestResult<T, 3> Concatenation3dDim1TestImpl(armnn::IWorkloadFactory& workloadFactory,
+    float qScale, int32_t qOffset)
+{
+    armnn::TensorInfo outputTensorInfo({ 2, 9, 2 }, armnn::GetDataType<T>());
+
+    LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 1, qScale, qOffset);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
+
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
+
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
+
+        // Batch 0, Channel 3
+        7.0f, 8.0f,
+
+        // Batch 0, Channel 4
+        9.0f, 10.0f,
+
+        // Batch 0, Channel 5
+        11.0f, 12.0f,
+
+        // Batch 0, Channel 6
+        13.0f, 14.0f,
+
+        // Batch 0, Channel 7
+        15.0f, 16.0f,
+
+        // Batch 0, Channel 8
+        17.0f, 18.0f,
+
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
+
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
+
+        // Batch 1, Channel 2
+        23.0f, 24.0f,
+
+        // Batch 1, Channel 3
+        25.0f, 26.0f,
+
+        // Batch 1, Channel 4
+        27.0f, 28.0f,
+
+        // Batch 1, Channel 5
+        29.0f, 30.0f,
+
+        // Batch 1, Channel 6
+        31.0f, 32.0f,
+
+        // Batch 1, Channel 7
+        33.0f, 34.0f,
+
+        // Batch 1, Channel 8
+        35.0f, 36.0f
+    }));
+
+    return result;
+}
+
+LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim1TestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+template <typename T>
+LayerTestResult<T, 3> Concatenation3dDim2TestImpl(armnn::IWorkloadFactory& workloadFactory,
+    float qScale, int32_t qOffset)
+{
+    armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
+
+    LayerTestResult<T, 3> result = Concatenation3dTestImpl<T>(workloadFactory, outputTensorInfo, 2, qScale, qOffset);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f, 7.0f, 8.0f, 13.0f, 14.0f,
+
+        // Batch 0, Channel 1
+        3.0f, 4.0f, 9.0f, 10.0f, 15.0f, 16.0f,
+
+        // Batch 0, Channel 2
+        5.0f, 6.0f, 11.0f, 12.0f, 17.0f, 18.0f,
+
+        // Batch 1, Channel 0
+        19.0f, 20.0f, 25.0f, 26.0f, 31.0f, 32.0f,
+
+        // Batch 1, Channel 1
+        21.0f, 22.0f, 27.0f, 28.0f, 33.0f, 34.0f,
+
+        // Batch 1, Channel 2
+        23.0f, 24.0f, 29.0f, 30.0f, 35.0f, 36.0f,
+    }));
+
+    return result;
+}
+
+LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim2TestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+template <typename T>
+LayerTestResult<T, 3> Concatenation3dDim0DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+            // Batch 0, Channel 0
+            1.0f, 2.0f,
+
+            // Batch 0, Channel 1
+            3.0f, 4.0f,
+
+            // Batch 0, Channel 2
+            5.0f, 6.0f,
+
+            // Batch 1, Channel 0
+            19.0f, 20.0f,
+
+            // Batch 1, Channel 1
+            21.0f, 22.0f,
+
+            // Batch 1, Channel 2
+            23.0f, 24.0f
+    }));
+
+    armnn::TensorInfo input1TensorInfo({ 1, 3, 2 }, armnn::GetDataType<T>());
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+            // Batch 0, Channel 0
+            7.0f, 8.0f,
+
+            // Batch 0, Channel 1
+            9.0f, 10.0f,
+
+            // Batch 0, Channel 2
+            11.0f, 12.0f,
+    }));
+
+    armnn::TensorInfo input2TensorInfo({ 3, 3, 2 }, armnn::GetDataType<T>());
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+            // Batch 0, Channel 0
+            25.0f, 26.0f,
+
+            // Batch 0, Channel 1
+            27.0f, 28.0f,
+
+            // Batch 0, Channel 2
+            29.0f, 30.0f,
+
+            // Batch 1, Channel 0
+            13.0f, 14.0f,
+
+            // Batch 1, Channel 1
+            15.0f, 16.0f,
+
+            // Batch 1, Channel 2
+            17.0f, 18.0f,
+
+            // Batch 2, Channel 0
+            31.0f, 32.0f,
+
+            // Batch 2, Channel 1
+            33.0f, 34.0f,
+
+            // Batch 2, Channel 2
+            35.0f, 36.0f
+    }));
+
+    armnn::TensorInfo outputTensorInfo({ 6, 3, 2 }, armnn::GetDataType<T>());
+    LayerTestResult<T, 3> result(outputTensorInfo);
+
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory,
+        { input0TensorInfo, input1TensorInfo, input2TensorInfo },
+        { input0.data(), input1.data(), input2.data() },
+        outputTensorInfo,
+        output.data(),
+        0);
+
+    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
+
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
+
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
+
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
+
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
+
+        // Batch 1, Channel 2
+        23.0f, 24.0f,
+
+        // Batch 2, Channel 0
+        7.0f, 8.0f,
+
+        // Batch 2, Channel 1
+        9.0f, 10.0f,
+
+        // Batch 2, Channel 2
+        11.0f, 12.0f,
+
+        // Batch 3, Channel 0
+        25.0f, 26.0f,
+
+        // Batch 3, Channel 1
+        27.0f, 28.0f,
+
+        // Batch 3, Channel 2
+        29.0f, 30.0f,
+
+        // Batch 4, Channel 0
+        13.0f, 14.0f,
+
+        // Batch 4, Channel 1
+        15.0f, 16.0f,
+
+        // Batch 4, Channel 2
+        17.0f, 18.0f,
+
+        // Batch 5, Channel 0
+        31.0f, 32.0f,
+
+        // Batch 5, Channel 1
+        33.0f, 34.0f,
+
+        // Batch 5, Channel 2
+        35.0f, 36.0f
+    }));
+
+    return result;
+}
+
+LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim0DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+template <typename T>
+LayerTestResult<T, 3> Concatenation3dDim1DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
+
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
+
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
+
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
+
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
+
+        // Batch 1, Channel 2
+        23.0f, 24.0f
+    }));
+
+    armnn::TensorInfo input1TensorInfo({ 2, 4, 2 }, armnn::GetDataType<T>());
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        7.0f, 8.0f,
+
+        // Batch 0, Channel 1
+        9.0f, 10.0f,
+
+        // Batch 0, Channel 2
+        11.0f, 12.0f,
+
+        // Batch 0, Channel 3
+        25.0f, 26.0f,
+
+        // Batch 1, Channel 0
+        27.0f, 28.0f,
+
+        // Batch 1, Channel 1
+        29.0f, 30.0f,
+
+        // Batch 1, Channel 2
+        13.0f, 14.0f,
+
+        // Batch 1, Channel 3
+        15.0f, 16.0f,
+    }));
+
+    armnn::TensorInfo input2TensorInfo({ 2, 1, 2 }, armnn::GetDataType<T>());
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        17.0f, 18.0f,
+
+        // Batch 1, Channel 0
+        31.0f, 32.0f,
+    }));
+
+    armnn::TensorInfo outputTensorInfo({ 2, 8, 2 }, armnn::GetDataType<T>());
+    LayerTestResult<T, 3> result(outputTensorInfo);
+
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory,
+        { input0TensorInfo, input1TensorInfo, input2TensorInfo },
+        { input0.data(), input1.data(), input2.data() },
+        outputTensorInfo,
+        output.data(),
+        1);
+
+    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
+
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
+
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
+
+        // Batch 0, Channel 3
+        7.0f, 8.0f,
+
+        // Batch 0, Channel 4
+        9.0f, 10.0f,
+
+        // Batch 0, Channel 5
+        11.0f, 12.0f,
+
+        // Batch 0, Channel 6
+        25.0f, 26.0f,
+
+        // Batch 0, Channel 7
+        17.0f, 18.0f,
+
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
+
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
+
+        // Batch 1, Channel 2
+        23.0f, 24.0f,
+
+        // Batch 1, Channel 3
+        27.0f, 28.0f,
+
+        // Batch 1, Channel 4
+        29.0f, 30.0f,
+
+        // Batch 1, Channel 5
+        13.0f, 14.0f,
+
+        // Batch 1, Channel 6
+        15.0f, 16.0f,
+
+        // Batch 1, Channel 7
+        31.0f, 32.0f,
+    }));
+
+    return result;
+}
+
+LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim1DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+template <typename T>
+LayerTestResult<T, 3> Concatenation3dDim2DiffInputDimsTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale,
+    int32_t qOffset)
+{
+    armnn::TensorInfo input0TensorInfo({ 2, 3, 2 }, armnn::GetDataType<T>());
+    auto input0 = MakeTensor<T, 3>(input0TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f,
+
+        // Batch 0, Channel 1
+        3.0f, 4.0f,
+
+        // Batch 0, Channel 2
+        5.0f, 6.0f,
+
+        // Batch 1, Channel 0
+        19.0f, 20.0f,
+
+        // Batch 1, Channel 1
+        21.0f, 22.0f,
+
+        // Batch 1, Channel 2
+        23.0f, 24.0f
+    }));
+
+    armnn::TensorInfo input1TensorInfo({ 2, 3, 1 }, armnn::GetDataType<T>());
+    auto input1 = MakeTensor<T, 3>(input1TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        7.0f,
+
+        // Batch 0, Channel 1
+        9.0f,
+
+        // Batch 0, Channel 2
+        11.0f,
+
+        // Batch 1, Channel 0
+        25.0f,
+
+        // Batch 1, Channel 1
+        27.0f,
+
+        // Batch 1, Channel 2
+        29.0f
+    }));
+
+    armnn::TensorInfo input2TensorInfo({ 2, 3, 3 }, armnn::GetDataType<T>());
+    auto input2 = MakeTensor<T, 3>(input2TensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        13.0f, 14.0f, 50.0f,
+
+        // Batch 0, Channel 1
+        15.0f, 16.0f, 51.0f,
+
+        // Batch 0, Channel 2
+        17.0f, 18.0f, 52.0f,
+
+        // Batch 1, Channel 0
+        31.0f, 32.0f, 53.0f,
+
+        // Batch 1, Channel 1
+        33.0f, 34.0f, 54.0f,
+
+        // Batch 1, Channel 2
+        35.0f, 36.0f, 55.0f,
+    }));
+
+    armnn::TensorInfo outputTensorInfo({ 2, 3, 6 }, armnn::GetDataType<T>());
+    LayerTestResult<T, 3> result(outputTensorInfo);
+
+    std::vector<T> output;
+    output.resize(outputTensorInfo.GetNumElements());
+    Concatenate<T>(workloadFactory,
+        { input0TensorInfo, input1TensorInfo, input2TensorInfo },
+        { input0.data(), input1.data(), input2.data() },
+        outputTensorInfo,
+        output.data(),
+        2);
+
+    result.output = MakeTensor<T, 3>(outputTensorInfo, output);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        1.0f, 2.0f, 7.0f, 13.0f, 14.0f, 50.0f,
+
+        // Batch 0, Channel 1
+        3.0f, 4.0f, 9.0f, 15.0f, 16.0f, 51.0f,
+
+        // Batch 0, Channel 2
+        5.0f, 6.0f, 11.0f, 17.0f, 18.0f, 52.0f,
+
+        // Batch 1, Channel 0
+        19.0f, 20.0f, 25.0f, 31.0f, 32.0f, 53.0f,
+
+        // Batch 1, Channel 1
+        21.0f, 22.0f, 27.0f, 33.0f, 34.0f, 54.0f,
+
+        // Batch 1, Channel 2
+        23.0f, 24.0f, 29.0f, 35.0f, 36.0f, 55.0f,
+    }));
+
+    return result;
+}
+
+LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory,
+                                                const armnn::DataLayoutIndexed& dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+    const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+
+    std::vector<float> inputData({
+        1.0f, 2.0f, 3.0f, 4.0f,
+        2.0f, 3.0f, 4.0f, 5.0f,
+        3.0f, 4.0f, 5.0f, 6.0f,
+        4.0f, 5.0f, 6.0f, 7.0f,
+
+        1.0f, 2.0f, 3.0f, 4.0f,
+        2.0f, 3.0f, 4.0f, 5.0f,
+        3.0f, 4.0f, 5.0f, 6.0f,
+        4.0f, 5.0f, 6.0f, 7.0f
+    });
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+        inputData = tmp;
+    }
+
+    auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
+
+    LayerTestResult<float, 4> result(outputTensorInfo);
+    result.outputExpected = input;
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory,
+                                                   const armnn::DataLayoutIndexed& dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+    const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 1, 1, dataLayout);
+
+    std::vector<float> inputData({
+          1.0f, 255.0f,
+        200.0f, 250.0f,
+
+        250.0f, 200.0f,
+        250.0f,   1.0f
+    });
+
+    // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
+    // then figures out the interpolants and weights. Note this is different to projecting the centre of the
+    // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
+    // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
+    // which we would expect if projecting the centre).
+
+    std::vector<float> outputData({
+          1.0f,
+
+        250.0f
+    });
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
+
+    LayerTestResult<float, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory,
+                                                  const armnn::DataLayoutIndexed& dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 4, 4, dataLayout);
+    const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 2, dataLayout);
+
+    std::vector<float> inputData({
+        1.0f, 2.0f, 3.0f, 4.0f,
+        2.0f, 3.0f, 4.0f, 5.0f,
+        3.0f, 4.0f, 5.0f, 6.0f,
+        4.0f, 5.0f, 6.0f, 7.0f,
+
+        7.0f, 6.0f, 5.0f, 4.0f,
+        6.0f, 5.0f, 4.0f, 3.0f,
+        5.0f, 4.0f, 3.0f, 2.0f,
+        4.0f, 3.0f, 2.0f, 1.0f
+    });
+
+    std::vector<float> outputData({
+        1.0f, 3.0f,
+        3.0f, 5.0f,
+
+        7.0f, 5.0f,
+        5.0f, 3.0f
+    });
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
+
+    LayerTestResult<float, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory,
+                                                const armnn::DataLayoutIndexed& dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+    const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 2, 3, dataLayout);
+
+    std::vector<float> inputData({
+          1.0f,   2.0f,   3.0f,   5.0f,   8.0f,
+         13.0f,  21.0f,  34.0f,  55.0f,  89.0f,
+        144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
+
+        987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
+         89.0f,  55.0f,  34.0f,  21.0f,  13.0f,
+          8.0f,   5.0f,   3.0f,   2.0f,   1.0f
+    });
+
+    std::vector<float> outputData({
+          1.0f,   2.6666f,   6.00f,
+         78.5f, 179.3333f, 401.00f,
+
+        987.0f, 454.6670f, 203.33f,
+         48.5f,  22.3333f,  10.00f
+    });
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
+
+    LayerTestResult<float, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory,
+                                                const armnn::DataLayoutIndexed& dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo = GetTensorInfo<float>(1, 2, 3, 2, dataLayout);
+    const armnn::TensorInfo outputTensorInfo = GetTensorInfo<float>(1, 2, 3, 5, dataLayout);
+
+    std::vector<float> inputData({
+          1.0f,   2.0f,
+         13.0f,  21.0f,
+        144.0f, 233.0f,
+
+        233.0f, 144.0f,
+         21.0f,  13.0f,
+          2.0f,   1.0f
+    });
+
+    std::vector<float> outputData({
+          1.0f,   1.4f,   1.8f,   2.0f,   2.0f,
+         13.0f,  16.2f,  19.4f,  21.0f,  21.0f,
+        144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
+
+        233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
+         21.0f,  17.8f,  14.6f,  13.0f,  13.0f,
+          2.0f,   1.6f,   1.2f,   1.0f,   1.0f
+    });
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
+
+    LayerTestResult<float, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    constexpr unsigned int width = 2;
+    constexpr unsigned int height = 3;
+
+    const armnn::TensorInfo tensorInfo({height, width },
+        armnn::DataType::Float32);
+    auto input = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
+       -10.0f,  -5.0f,
+         0.0f,   5.0f,
+        10.0f,  10.0f
+    }));
+
+    LayerTestResult<float, 2> ret(tensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle  = workloadFactory.CreateTensorHandle(tensorInfo);
+
+    armnn::FakeQuantizationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
+    float min = -10.f;
+    float max = 10.f;
+
+    data.m_Parameters.m_Min = min;
+    data.m_Parameters.m_Max = max;
+
+    armnn::PassthroughCpuTensorHandle refHandle(tensorInfo, &ret.outputExpected[0][0]);
+    armnn::FakeQuantizationQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadOutput(refData, refInfo, 0, tensorInfo, &refHandle);
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFakeQuantization(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+
+    ret.outputExpected = MakeTensor<float, 2>(tensorInfo, std::vector<float>({
+        0.0f,     63.0f,
+        128.0f,   191.0f,
+        255.0f,   255.0f
+    }));
+    return ret;
+}
+
+namespace
+{
+
+LayerTestResult<float, 4> L2NormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                  const armnn::TensorShape& inputOutputTensorShape,
+                                                  const std::vector<float>& inputValues,
+                                                  const std::vector<float>& expectedOutputValues,
+                                                  armnn::DataLayout dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
+
+    auto inputTensor = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(inputValues));
+
+    LayerTestResult<float, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>(expectedOutputValues));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::L2NormalizationQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateL2Normalization(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+    return result;
+}
+
+float CalcInvL2Norm(std::initializer_list<float> elements)
+{
+    const float reduction = std::accumulate(elements.begin(), elements.end(), 0.0f,
+        [](float acc, float element) { return acc + element * element; });
+    return 1.0f / sqrtf(reduction);
+}
+
+} // anonymous namespace
+
+template<typename T>
+LayerTestResult<T, 2> Pad2dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
+{
+  const armnn::TensorShape inputShape{ 3, 3 };
+  const armnn::TensorShape outputShape{ 7, 7 };
+
+  const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
+  const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+
+  std::vector<T> inputValues(
+    QuantizedVector<T>(qScale, qOffset,
+    {
+      // Height (3) x Width (3)
+      4, 8, 6,
+      7, 4, 4,
+      3, 2, 4
+    }));
+
+ std::vector<T> expectedOutputValues(
+  QuantizedVector<T>(qScale, qOffset,
+    {
+      0, 0, 0, 0, 0, 0, 0,
+      0, 0, 0, 0, 0, 0, 0,
+      0, 0, 4, 8, 6, 0, 0,
+      0, 0, 7, 4, 4, 0, 0,
+      0, 0, 3, 2, 4, 0, 0,
+      0, 0, 0, 0, 0, 0, 0,
+      0, 0, 0, 0, 0, 0, 0
+    }));
+
+  auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues));
+
+  LayerTestResult<T, 2> result(outputTensorInfo);
+  result.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(expectedOutputValues));
+
+  std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+  std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+  armnn::PadQueueDescriptor descriptor;
+
+  std::vector<std::pair<unsigned int, unsigned int>> PadList;
+  PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+  PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+  descriptor.m_Parameters.m_PadList = PadList;
+  armnn::WorkloadInfo info;
+
+  AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+  AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+  std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+  inputHandle->Allocate();
+  outputHandle->Allocate();
+
+  CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+
+  workloadFactory.Finalize();
+  workload->Execute();
+
+  CopyDataFromITensorHandle(&result.output[0][0], outputHandle.get());
+
+  return result;
+}
+
+template <typename T>
+LayerTestResult<T, 3> Pad3dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
+{
+    const armnn::TensorShape inputShape{ 2, 2, 2 };
+    const armnn::TensorShape outputShape{ 3, 5, 6 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
+    const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+
+    std::vector<T> inputValues(
+      QuantizedVector<T>(qScale,qOffset,
+    {
+        // Channel 0, Height (2) x Width (2)
+        0, 4,
+        2, 5,
+
+        // Channel 1, Height (2) x Width (2)
+        6, 1,
+        5, 2
+    }));
+
+    std::vector<T> expectedOutputValues(
+      QuantizedVector<T>(qScale,qOffset,
+    {
+
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 4, 0, 0,
+        0, 0, 2, 5, 0, 0,
+        0, 0, 0, 0, 0, 0,
+
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 6, 1, 0, 0,
+        0, 0, 5, 2, 0, 0,
+        0, 0, 0, 0, 0, 0,
+
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0,
+        0, 0, 0, 0, 0, 0
+
+    }));
+
+    auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues));
+
+    LayerTestResult<T, 3> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 3>(outputTensorInfo, std::vector<T>(expectedOutputValues));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::PadQueueDescriptor descriptor;
+
+    std::vector<std::pair<unsigned int, unsigned int>> PadList;
+    PadList.push_back(std::pair<unsigned int, unsigned int>(0,1));
+    PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
+    PadList.push_back(std::pair<unsigned int, unsigned int>(2,2));
+
+    descriptor.m_Parameters.m_PadList = PadList;
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0], outputHandle.get());
+
+    return result;
+}
+
+template <typename T>
+LayerTestResult<T, 4> Pad4dTestCommon(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
+{
+    const armnn::TensorShape inputShape{ 2, 2, 3, 2 };
+    const armnn::TensorShape outputShape{ 4, 5, 7, 4 };
+
+    const armnn::TensorInfo inputTensorInfo(inputShape, armnn::GetDataType<T>());
+    const armnn::TensorInfo outputTensorInfo(outputShape, armnn::GetDataType<T>());
+
+    std::vector<T> inputValues(
+      QuantizedVector<T>(qScale,qOffset,
+    {
+        // Batch 0, Channel 0, Height (3) x Width (2)
+        0, 1,
+        2, 3,
+        4, 5,
+
+        // Batch 0, Channel 1, Height (3) x Width (2)
+        6, 7,
+        8, 9,
+        10, 11,
+
+        // Batch 1, Channel 0, Height (3) x Width (2)
+        12, 13,
+        14, 15,
+        16, 17,
+
+        // Batch 1, Channel 1, Height (3) x Width (2)
+        18, 19,
+        20, 21,
+        22, 23
+    }));
+
+    std::vector<T> expectedOutputValues(
+      QuantizedVector<T>(qScale,qOffset,
+    {
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 1, 0,
+        0, 2, 3, 0,
+        0, 4, 5, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 6, 7, 0,
+        0, 8, 9, 0,
+        0, 10, 11, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 12, 13, 0,
+        0, 14, 15, 0,
+        0, 16, 17, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 18, 19, 0,
+        0, 20, 21, 0,
+        0, 22, 23, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0,
+        0, 0, 0, 0
+    }));
+
+    auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, std::vector<T>(expectedOutputValues));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::PadQueueDescriptor descriptor;
+
+    std::vector<std::pair<unsigned int, unsigned int>> PadList;
+    PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+    PadList.push_back(std::pair<unsigned int, unsigned int>(2,1));
+    PadList.push_back(std::pair<unsigned int, unsigned int>(3,1));
+    PadList.push_back(std::pair<unsigned int, unsigned int>(1,1));
+
+    descriptor.m_Parameters.m_PadList = PadList;
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePad(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]);
+
+    workloadFactory.Finalize();
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+    return result;
+}
+
+LayerTestResult<uint8_t, 2> PadUint82dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+  return Pad2dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
+}
+
+LayerTestResult<uint8_t, 3> PadUint83dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+  return Pad3dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> PadUint84dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+  return Pad4dTestCommon<uint8_t>(workloadFactory, 1.0f, 0);
+}
+
+LayerTestResult<float, 2> PadFloat322dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+  return Pad2dTestCommon<float>(workloadFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 3> PadFloat323dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+  return Pad3dTestCommon<float>(workloadFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 4> PadFloat324dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+  return Pad4dTestCommon<float>(workloadFactory, 0.0f, 0);
+}
+
+LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // Width: 1
+    // Height: 1
+    // Channels: 10
+    // BatchSize: 1
+
+    const armnn::TensorShape inputOutputShape{ 1, 10, 1, 1 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Channel 0, Height (1) x Width (1)
+         1.0f,
+
+        // Batch 0, Channel 1, Height (1) x Width (1)
+         2.0f,
+
+        // Batch 0, Channel 2, Height (1) x Width (1)
+         3.0f,
+
+        // Batch 0, Channel 3, Height (1) x Width (1)
+         4.0f,
+
+        // Batch 0, Channel 4, Height (1) x Width (1)
+         5.0f,
+
+        // Batch 0, Channel 5, Height (1) x Width (1)
+         6.0f,
+
+        // Batch 0, Channel 6, Height (1) x Width (1)
+         7.0f,
+
+        // Batch 0, Channel 7, Height (1) x Width (1)
+         8.0f,
+
+        // Batch 0, Channel 8, Height (1) x Width (1)
+         9.0f,
+
+        // Batch 0, Channel 9, Height (1) x Width (1)
+        10.0f
+    };
+    const float approxInvL2Norm = 0.050964719f;
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Channel 0, Height (1) x Width (1)
+         1.0f * approxInvL2Norm,
+         2.0f * approxInvL2Norm,
+         3.0f * approxInvL2Norm,
+         4.0f * approxInvL2Norm,
+         5.0f * approxInvL2Norm,
+         6.0f * approxInvL2Norm,
+         7.0f * approxInvL2Norm,
+         8.0f * approxInvL2Norm,
+         9.0f * approxInvL2Norm,
+        10.0f * approxInvL2Norm
+    };
+
+    return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+                                   inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // Width: 1
+    // Height: 1
+    // Channels: 10
+    // BatchSize: 1
+
+    const armnn::TensorShape inputOutputShape{ 1, 1, 1, 10 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Height 0, Width (1) x Channel (10)
+        1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f
+    };
+    const float approxInvL2Norm = 0.050964719f;
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Height 0, Width (1) x Channel (10)
+         1.0f * approxInvL2Norm,
+         2.0f * approxInvL2Norm,
+         3.0f * approxInvL2Norm,
+         4.0f * approxInvL2Norm,
+         5.0f * approxInvL2Norm,
+         6.0f * approxInvL2Norm,
+         7.0f * approxInvL2Norm,
+         8.0f * approxInvL2Norm,
+         9.0f * approxInvL2Norm,
+        10.0f * approxInvL2Norm
+    };
+
+    return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+                                   inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // Width: 5
+    // Height: 1
+    // Channels: 2
+    // BatchSize: 1
+
+    const armnn::TensorShape inputOutputShape{ 1, 2, 1, 5 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Channel 0, Height (1) x Width (5)
+        1.0f, 3.0f, 5.0f, 7.0f,  9.0f,
+
+        // Batch 0, Channel 1, Height (1) x Width (5)
+        2.0f, 4.0f, 6.0f, 8.0f, 10.0f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Channel 0, Height (1) x Width (5)
+         1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
+         3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
+         5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
+         7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
+         9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
+
+        // Batch 0, Channel 1, Height (1) x Width (5)
+         2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
+         4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
+         6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
+         8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
+        10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
+    };
+
+    return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+                                   inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // Width: 5
+    // Height: 1
+    // Channels: 2
+    // BatchSize: 1
+
+    const armnn::TensorShape inputOutputShape{ 1, 1, 5, 2 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Height 0, Width (5) x Channel (2)
+        1.0f,  2.0f,
+        3.0f,  4.0f,
+        5.0f,  6.0f,
+        7.0f,  8.0f,
+        9.0f, 10.0f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Height 0, Width (5) x Channel (2)
+        1.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
+        2.0f * CalcInvL2Norm({ 1.0f,  2.0f }),
+        3.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
+        4.0f * CalcInvL2Norm({ 3.0f,  4.0f }),
+        5.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
+        6.0f * CalcInvL2Norm({ 5.0f,  6.0f }),
+        7.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
+        8.0f * CalcInvL2Norm({ 7.0f,  8.0f }),
+        9.0f * CalcInvL2Norm({ 9.0f, 10.0f }),
+       10.0f * CalcInvL2Norm({ 9.0f, 10.0f })
+    };
+
+    return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+                                   inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // Width: 3
+    // Height: 4
+    // Channels: 2
+    // BatchSize: 1
+
+    const armnn::TensorShape inputOutputShape{ 1, 2, 4, 3 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Channel 0, Height (4) x Width (3)
+        119.0f,  21.0f, 150.0f,
+        149.0f,  32.0f, 179.0f,
+         15.0f, 227.0f, 141.0f,
+        147.0f, 199.0f, 220.0f,
+
+        // Batch 0, Channel 1, Height (4) x Width (3)
+        110.0f, 140.0f,  73.0f,
+        211.0f, 212.0f,  89.0f,
+         24.0f, 138.0f, 188.0f,
+        162.0f,  12.0f, 161.0f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Channel 0, Height (4) x Width (3)
+        119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
+         21.0f * CalcInvL2Norm({  21.0f, 140.0f }),
+        150.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
+        149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
+         32.0f * CalcInvL2Norm({  32.0f, 212.0f }),
+        179.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
+         15.0f * CalcInvL2Norm({  15.0f,  24.0f }),
+        227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
+        141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
+        147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
+        199.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
+        220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
+
+        // Batch 0, Channel 1, Height (4) x Width (3)
+        110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
+        140.0f * CalcInvL2Norm({  21.0f, 140.0f }),
+         73.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
+        211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
+        212.0f * CalcInvL2Norm({  32.0f, 212.0f }),
+         89.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
+         24.0f * CalcInvL2Norm({  15.0f,  24.0f }),
+        138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
+        188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
+        162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
+         12.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
+        161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
+    };
+
+    return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+                                   inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // Width: 3
+    // Height: 4
+    // Channels: 2
+    // BatchSize: 1
+
+    const armnn::TensorShape inputOutputShape{ 1, 4, 3, 2 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Height 0, Width (3) x Channel (2)
+        119.0f, 110.0f,
+         21.0f, 140.0f,
+        150.0f,  73.0f,
+
+        // Batch 0, Height 1, Width (3) x Channel (2)
+        149.0f, 211.0f,
+         32.0f, 212.0f,
+        179.0f,  89.0f,
+
+        // Batch 0, Height 2, Width (3) x Channel (2)
+         15.0f,  24.0f,
+        227.0f, 138.0f,
+        141.0f, 188.0f,
+
+        // Batch 0, Height 3, Width (3) x Channel (2)
+        147.0f, 162.0f,
+        199.0f,  12.0f,
+        220.0f, 161.0f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Height 0, Width (3) x Channel (2)
+        119.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
+        110.0f * CalcInvL2Norm({ 119.0f, 110.0f }),
+         21.0f * CalcInvL2Norm({  21.0f, 140.0f }),
+        140.0f * CalcInvL2Norm({  21.0f, 140.0f }),
+        150.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
+         73.0f * CalcInvL2Norm({ 150.0f,  73.0f }),
+
+        // Batch 0, Height 1, Width (3) x Channel (2)
+        149.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
+        211.0f * CalcInvL2Norm({ 149.0f, 211.0f }),
+         32.0f * CalcInvL2Norm({  32.0f, 212.0f }),
+        212.0f * CalcInvL2Norm({  32.0f, 212.0f }),
+        179.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
+         89.0f * CalcInvL2Norm({ 179.0f,  89.0f }),
+
+        // Batch 0, Height 2, Width (3) x Channel (2)
+         15.0f * CalcInvL2Norm({  15.0f,  24.0f }),
+         24.0f * CalcInvL2Norm({  15.0f,  24.0f }),
+        227.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
+        138.0f * CalcInvL2Norm({ 227.0f, 138.0f }),
+        141.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
+        188.0f * CalcInvL2Norm({ 141.0f, 188.0f }),
+
+        // Batch 0, Height 3, Width (3) x Channel (2)
+        147.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
+        162.0f * CalcInvL2Norm({ 147.0f, 162.0f }),
+        199.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
+         12.0f * CalcInvL2Norm({ 199.0f,  12.0f }),
+        220.0f * CalcInvL2Norm({ 220.0f, 161.0f }),
+        161.0f * CalcInvL2Norm({ 220.0f, 161.0f })
+    };
+
+    return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+                                   inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // Width: 3
+    // Height: 4
+    // Channels: 3
+    // BatchSize: 2
+
+    const armnn::TensorShape inputOutputShape{ 2, 3, 4, 3 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Channel 0, Height (4) x Width (3)
+        235.0f,  46.0f, 178.0f,
+        100.0f, 123.0f,  19.0f,
+        172.0f,  74.0f, 250.0f,
+          6.0f, 195.0f,  80.0f,
+
+        // Batch 0, Channel 1, Height (4) x Width (3)
+        113.0f,  95.0f, 202.0f,
+         77.0f, 114.0f,  71.0f,
+        122.0f, 246.0f, 166.0f,
+         82.0f,  28.0f,  37.0f,
+
+        // Batch 0, Channel 2, Height (4) x Width (3)
+         56.0f, 170.0f, 162.0f,
+        194.0f,  89.0f, 254.0f,
+         12.0f, 209.0f, 200.0f,
+          1.0f,  64.0f,  54.0f,
+
+        // Batch 1, Channel 0, Height (4) x Width (3)
+         67.0f,  90.0f,  49.0f,
+          7.0f, 163.0f,  18.0f,
+         25.0f, 117.0f, 103.0f,
+        247.0f,  59.0f, 189.0f,
+
+        // Batch 1, Channel 1, Height (4) x Width (3)
+        239.0f, 104.0f, 199.0f,
+         17.0f, 124.0f, 153.0f,
+        222.0f, 217.0f, 75.0f,
+         32.0f, 126.0f, 21.0f,
+
+        // Batch 1, Channel 2, Height (4) x Width (3)
+         97.0f, 145.0f, 215.0f,
+        115.0f, 116.0f, 238.0f,
+        226.0f,  16.0f, 132.0f,
+         92.0f, 125.0f,  88.0f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Channel 0, Height (4) x Width (3)
+        235.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
+         46.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
+        178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
+        100.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
+        123.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
+         19.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
+        172.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
+         74.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
+        250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
+          6.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
+        195.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
+         80.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
+
+        // Batch 0, Channel 1, Height (4) x Width (3)
+        113.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
+         95.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
+        202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
+         77.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
+        114.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
+         71.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
+        122.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
+        246.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
+        166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
+         82.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
+         28.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
+         37.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
+
+        // Batch 0, Channel 2, Height (4) x Width (3)
+         56.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
+        170.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
+        162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
+        194.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
+         89.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
+        254.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
+         12.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
+        209.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
+        200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
+          1.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
+         64.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
+         54.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
+
+        // Batch 1, Channel 0, Height (4) x Width (3)
+         67.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
+         90.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
+         49.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
+          7.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
+        163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+         18.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
+         25.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
+        117.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
+        103.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
+        247.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
+         59.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
+        189.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
+
+        // Batch 1, Channel 1, Height (4) x Width (3)
+        239.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
+        104.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
+        199.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
+         17.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
+        124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+        153.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
+        222.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
+        217.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
+         75.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
+         32.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
+        126.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
+         21.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
+
+        // Batch 1, Channel 2, Height (4) x Width (3)
+         97.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
+        145.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
+        215.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
+        115.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
+        116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+        238.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
+        226.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
+         16.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
+        132.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
+         92.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
+        125.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
+         88.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f })
+    };
+
+    return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+                                   inputValues, expectedOutputValues, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // Width: 3
+    // Height: 4
+    // Channels: 3
+    // BatchSize: 2
+
+    const armnn::TensorShape inputOutputShape{ 2, 4, 3, 3 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Height 0, Width (3) x Channel (3)
+        235.0f, 113.0f,  56.0f,
+         46.0f,  95.0f, 170.0f,
+        178.0f, 202.0f, 162.0f,
+
+        // Batch 0, Height 1, Width (3) x Channel (3)
+        100.0f,  77.0f, 194.0f,
+        123.0f, 114.0f,  89.0f,
+         19.0f,  71.0f, 254.0f,
+
+        // Batch 0, Height 2, Width (3) x Channel (3)
+        172.0f, 122.0f,  12.0f,
+         74.0f, 246.0f, 209.0f,
+        250.0f, 166.0f, 200.0f,
+
+        // Batch 0, Height 3, Width (3) x Channel (3)
+          6.0f,  82.0f,   1.0f,
+        195.0f,  28.0f,  64.0f,
+         80.0f,  37.0f,  54.0f,
+
+        // Batch 1, Height 0, Width (3) x Channel (3)
+         67.0f, 239.0f,  97.0f,
+         90.0f, 104.0f, 145.0f,
+         49.0f, 199.0f, 215.0f,
+
+        // Batch 1, Height 1, Width (3) x Channel (3)
+          7.0f,  17.0f, 115.0f,
+        163.0f, 124.0f, 116.0f,
+         18.0f, 153.0f, 238.0f,
+
+        // Batch 1, Height 2, Width (3) x Channel (3)
+         25.0f, 222.0f, 226.0f,
+        117.0f, 217.0f,  16.0f,
+        103.0f,  75.0f, 132.0f,
+
+        // Batch 1, Height 3, Width (3) x Channel (3)
+        247.0f,  32.0f,  92.0f,
+         59.0f, 126.0f, 125.0f,
+        189.0f,  21.0f,  88.0f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Height 0, Width (3) x Channel (3)
+        235.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
+        113.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
+         56.0f * CalcInvL2Norm({ 235.0f, 113.0f,  56.0f }),
+         46.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
+         95.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
+        170.0f * CalcInvL2Norm({  46.0f,  95.0f, 170.0f }),
+        178.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
+        202.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
+        162.0f * CalcInvL2Norm({ 178.0f, 202.0F, 162.0f }),
+
+        // Batch 0, Height 1, Width (3) x Channel (3)
+        100.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
+         77.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
+        194.0f * CalcInvL2Norm({ 100.0f,  77.0f, 194.0f }),
+        123.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
+        114.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
+         89.0f * CalcInvL2Norm({ 123.0f, 114.0f,  89.0f }),
+         19.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
+         71.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
+        254.0f * CalcInvL2Norm({  19.0f,  71.0f, 254.0f }),
+
+        // Batch 0, Height 2, Width (3) x Channel (3)
+        172.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
+        122.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
+         12.0f * CalcInvL2Norm({ 172.0f, 122.0f,  12.0f }),
+         74.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
+        246.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
+        209.0f * CalcInvL2Norm({  74.0f, 246.0f, 209.0f }),
+        250.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
+        166.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
+        200.0f * CalcInvL2Norm({ 250.0f, 166.0f, 200.0f }),
+
+        // Batch 0, Height 3, Width (3) x Channel (3)
+          6.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
+         82.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
+          1.0f * CalcInvL2Norm({   6.0f,  82.0f,   1.0f }),
+        195.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
+         28.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
+         64.0f * CalcInvL2Norm({ 195.0f,  28.0f,  64.0f }),
+         80.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
+         37.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
+         54.0f * CalcInvL2Norm({  80.0f,  37.0f,  54.0f }),
+
+        // Batch 1, Height 0, Width (3) x Channel (3)
+         67.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
+        239.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
+         97.0f * CalcInvL2Norm({  67.0f, 239.0f,  97.0f }),
+         90.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
+        104.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
+        145.0f * CalcInvL2Norm({  90.0f, 104.0f, 145.0f }),
+         49.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
+        199.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
+        215.0f * CalcInvL2Norm({  49.0f, 199.0f, 215.0f }),
+
+        // Batch 1, Height 1, Width (3) x Channel (3)
+          7.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
+         17.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
+        115.0f * CalcInvL2Norm({   7.0f,  17.0f, 115.0f }),
+        163.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+        124.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+        116.0f * CalcInvL2Norm({ 163.0f, 124.0f, 116.0f }),
+         18.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
+        153.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
+        238.0f * CalcInvL2Norm({  18.0f, 153.0f, 238.0f }),
+
+        // Batch 1, Height 2, Width (3) x Channel (3)
+         25.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
+        222.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
+        226.0f * CalcInvL2Norm({  25.0f, 222.0f, 226.0f }),
+        117.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
+        217.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
+         16.0f * CalcInvL2Norm({ 117.0f, 217.0f,  16.0f }),
+        103.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
+         75.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
+        132.0f * CalcInvL2Norm({ 103.0f,  75.0f, 132.0f }),
+
+        // Batch 1, Height 3, Width (3) x Channel (3)
+        247.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
+         32.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
+         92.0f * CalcInvL2Norm({ 247.0f,  32.0f,  92.0f }),
+         59.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
+        126.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
+        125.0f * CalcInvL2Norm({  59.0f, 126.0f, 125.0f }),
+        189.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
+         21.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f }),
+         88.0f * CalcInvL2Norm({ 189.0f,  21.0f,  88.0f })
+    };
+
+    return L2NormalizationTestImpl(workloadFactory, inputOutputShape,
+                                   inputValues, expectedOutputValues, armnn::DataLayout::NHWC);
+}
+
+template <typename T>
+LayerTestResult<T, 4> ConstantTestImpl(armnn::IWorkloadFactory& workloadFactory,
+    float qScale,
+    int32_t qOffset)
+{
+    constexpr unsigned int inputWidth = 3;
+    constexpr unsigned int inputHeight = 4;
+    constexpr unsigned int inputChannels = 3;
+    constexpr unsigned int inputBatchSize = 2;
+
+    constexpr unsigned int outputWidth = inputWidth;
+    constexpr unsigned int outputHeight = inputHeight;
+    constexpr unsigned int outputChannels = inputChannels;
+    constexpr unsigned int outputBatchSize = inputBatchSize;
+
+    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
+        armnn::GetDataType<T>());
+
+    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
+        armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+        // Batch 0, Channel 0
+        235.0f,  46.0f, 178.0f,
+        100.0f, 123.0f,  19.0f,
+        172.0f,  74.0f, 250.0f,
+          6.0f, 195.0f,  80.0f,
+
+        // Batch 0, Channel 1
+        113.0f,  95.0f, 202.0f,
+         77.0f, 114.0f,  71.0f,
+        122.0f, 246.0f, 166.0f,
+         82.0f,  28.0f,  37.0f,
+
+        // Batch 0, Channel 2
+         56.0f, 170.0f, 162.0f,
+        194.0f,  89.0f, 254.0f,
+         12.0f, 209.0f, 200.0f,
+          1.0f,  64.0f,  54.0f,
+
+        // Batch 1, Channel 0
+         67.0f,  90.0f,  49.0f,
+          7.0f, 163.0f,  18.0f,
+         25.0f, 117.0f, 103.0f,
+        247.0f,  59.0f, 189.0f,
+
+        // Batch 1, Channel 1
+        239.0f, 104.0f, 199.0f,
+         17.0f, 124.0f, 153.0f,
+        222.0f, 217.0f, 75.0f,
+         32.0f, 126.0f, 21.0f,
+
+        // Batch 1, Channel 2
+         97.0f, 145.0f, 215.0f,
+        115.0f, 116.0f, 238.0f,
+        226.0f,  16.0f, 132.0f,
+         92.0f, 125.0f,  88.0f,
+    })));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = input;
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ScopedCpuTensorHandle constantTensor(inputTensorInfo);
+    AllocateAndCopyDataToITensorHandle(&constantTensor, &input[0][0][0][0]);
+
+    armnn::ConstantQueueDescriptor descriptor;
+    descriptor.m_LayerOutput = &constantTensor;
+
+    armnn::WorkloadInfo info;
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateConstant(descriptor, info);
+
+    outputHandle->Allocate();
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return ConstantTestImpl<float>(workloadFactory, 0.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory)
+{
+    return ConstantTestImpl<uint8_t>(workloadFactory, 1.0f, 0);
+}
+
+LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int outputWidth = 3;
+    unsigned int outputHeight = 6;
+    unsigned int outputChannels = 3;
+
+    unsigned int inputWidth1 = 3;
+    unsigned int inputHeight1 = 6;
+    unsigned int inputChannels1 = 2;
+
+    unsigned int inputWidth2 = 3;
+    unsigned int inputHeight2 = 6;
+    unsigned int inputChannels2 = 1;
+
+    // Defines the tensor descriptors.
+    armnn::TensorInfo outputTensorInfo({ outputChannels, outputHeight, outputWidth }, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo inputTensorInfo1({ inputChannels1, inputHeight1, inputWidth1 }, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo inputTensorInfo2({ inputChannels2, inputHeight2, inputWidth2 }, armnn::DataType::QuantisedAsymm8);
+
+    // Arbitrary scale and offsets. They don't really matter as the merger operator doesn't dequantize/quantize them.
+    const float scale = 0.13497836f;
+    const int32_t offset = -7;
+
+    outputTensorInfo.SetQuantizationScale(scale);
+    outputTensorInfo.SetQuantizationOffset(offset);
+    inputTensorInfo1.SetQuantizationScale(scale);
+    inputTensorInfo1.SetQuantizationOffset(offset);
+    inputTensorInfo2.SetQuantizationScale(scale);
+    inputTensorInfo2.SetQuantizationOffset(offset);
+
+    LayerTestResult<uint8_t, 3> ret(outputTensorInfo);
+
+    ret.outputExpected = MakeTensor<uint8_t, 3>(outputTensorInfo, std::vector<uint8_t>(
+        {
+            1, 2, 3,
+            4, 5, 6,
+            7, 8, 9,
+            10, 11, 12,
+            13, 14, 15,
+            16, 17, 18,
+
+            19, 20, 21,
+            22, 23, 24,
+            25, 26, 27,
+            28, 29, 30,
+            31, 32, 33,
+            34, 35, 36,
+
+            37, 38, 39,
+            40, 41, 42,
+            43, 44, 45,
+            46, 47, 48,
+            49, 50, 51,
+            52, 53, 54,
+        })
+    );
+
+    auto input1 = MakeTensor<uint8_t, 3>(inputTensorInfo1, std::vector<uint8_t>(
+    {
+        1, 2, 3,
+        4, 5, 6,
+        7, 8, 9,
+        10, 11, 12,
+        13, 14, 15,
+        16, 17, 18,
+
+        19, 20, 21,
+        22, 23, 24,
+        25, 26, 27,
+        28, 29, 30,
+        31, 32, 33,
+        34, 35, 36,
+    })
+    );
+
+    auto input2 = MakeTensor<uint8_t, 3>(inputTensorInfo2, std::vector<uint8_t>(
+    {
+        37, 38, 39,
+        40, 41, 42,
+        43, 44, 45,
+        46, 47, 48,
+        49, 50, 51,
+        52, 53, 54,
+    })
+    );
+
+    std::vector<unsigned int> wOrigin1 = { 0, 0, 0 }; //Extent of the window is defined by size of input[0].
+    armnn::MergerQueueDescriptor::ViewOrigin window1(wOrigin1);
+
+    std::vector<unsigned int> wOrigin2 = { 2, 0, 0 }; //Extent of the window is defined by size of input[1].
+    armnn::MergerQueueDescriptor::ViewOrigin window2(wOrigin2);
+
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    bool subTensorsSupported = workloadFactory.SupportsSubTensors();
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 =
+        subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo1.GetShape(), wOrigin1.data()) :
+            workloadFactory.CreateTensorHandle(inputTensorInfo1);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 =
+        subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle, inputTensorInfo2.GetShape(), wOrigin2.data()) :
+            workloadFactory.CreateTensorHandle(inputTensorInfo2);
+
+
+    armnn::MergerQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    data.m_ViewOrigins.push_back(window1);
+    data.m_ViewOrigins.push_back(window2);
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMerger(data, info);
+
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int batchSize = 1;
+    unsigned int channels = 2;
+    unsigned int height = 2;
+    unsigned int width = 3;
+
+    const float scale = 7.0f;
+    const int32_t offset = 3;
+
+    armnn::TensorInfo inputTensorInfo1, inputTensorInfo2;
+    armnn::TensorInfo outputTensorInfo;
+
+    const unsigned int shape[] = { batchSize, channels, height, width };
+    inputTensorInfo1 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
+    inputTensorInfo1.SetQuantizationScale(scale);
+    inputTensorInfo1.SetQuantizationOffset(offset);
+
+    inputTensorInfo2 = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
+    inputTensorInfo2.SetQuantizationScale(scale);
+    inputTensorInfo2.SetQuantizationOffset(offset);
+
+    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::QuantisedAsymm8);
+    outputTensorInfo.SetQuantizationScale(scale);
+    outputTensorInfo.SetQuantizationOffset(offset);
+
+    // See dequantized values to the right.
+    auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
+    {
+         63,  35,  77,  70,  56, 112, //  420, 224,  518,  469,  371, 763
+        203,  28, 252, 168, 245,  91  // 1400, 175, 1743, 1155, 1694, 616
+    }));
+
+    // See dequantized values to the right.
+    auto input2 = MakeTensor<uint8_t, 4>(inputTensorInfo1, std::vector<uint8_t>(
+    {
+         21,   7, 175, 231, 175, 210, // 126,   28, 1204, 1596, 1204, 1449
+        126, 161,  63,  21, 105, 126  // 861, 1106,  420,  126,  714,  861
+    }));
+
+    // See dequantized values to the right.
+    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>(
+    {
+         81,  39, 249, 255, 228, 255, //  546,  252, 1722, 2065(clamped), 1575, 2212(clamped)
+        255, 186, 255, 186, 255, 214, // 2261(clamped), 1281, 2163(clamped), 1281, 2408(clamped), 1477
+    }));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle2 = workloadFactory.CreateTensorHandle(inputTensorInfo2);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::AdditionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo1, inputHandle1.get());
+    AddInputToWorkload(data, info, inputTensorInfo2, inputHandle2.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateAddition(data, info);
+
+    inputHandle1->Allocate();
+    inputHandle2->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle2.get(), &input2[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+    return result;
+}
+
+namespace
+{
+LayerTestResult<uint8_t, 4> MultiplicationUint8TestHelper(armnn::IWorkloadFactory& workloadFactory,
+                                                          const unsigned int shape0[4],
+                                                          const std::vector<uint8_t> & values0,
+                                                          float scale0,
+                                                          int32_t offset0,
+                                                          const unsigned int shape1[4],
+                                                          const std::vector<uint8_t> & values1,
+                                                          float scale1,
+                                                          int32_t offset1,
+                                                          const unsigned int outShape[4],
+                                                          const std::vector<uint8_t> & outValues,
+                                                          float outScale,
+                                                          int32_t outOffset)
+{
+    armnn::TensorInfo inputTensorInfo0(4, shape0, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo inputTensorInfo1(4, shape1, armnn::DataType::QuantisedAsymm8);
+    armnn::TensorInfo outputTensorInfo(4, outShape, armnn::DataType::QuantisedAsymm8);
+
+    inputTensorInfo0.SetQuantizationScale(scale0);
+    inputTensorInfo0.SetQuantizationOffset(offset0);
+
+    inputTensorInfo1.SetQuantizationScale(scale1);
+    inputTensorInfo1.SetQuantizationOffset(offset1);
+
+    outputTensorInfo.SetQuantizationScale(outScale);
+    outputTensorInfo.SetQuantizationOffset(outOffset);
+
+    auto input0 = MakeTensor<uint8_t, 4>(inputTensorInfo0, values0);
+    auto input1 = MakeTensor<uint8_t, 4>(inputTensorInfo1, values1);
+
+    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, outValues);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::MultiplicationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
+    AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMultiplication(data, info);
+
+    inputHandle0->Allocate();
+    inputHandle1->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+    return result;
+}
+} // anonymous namespace
+
+LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    unsigned int batchSize = 1;
+    unsigned int channels = 2;
+    unsigned int height = 2;
+    unsigned int width = 3;
+    const unsigned int shape[] = { batchSize, channels, height, width };
+
+    // See dequantized values to the right.
+    std::vector<uint8_t> input0({
+         62,  37,   3, 172,  13, 111, // 244, 144,   8, 684,  48, 440,
+        188,  20,  73,  31,  23,  31  // 748,  76, 288, 120,  88, 120
+    });
+
+    // See dequantized values to the right.
+    std::vector<uint8_t> input1({
+        126, 240, 252, 183, 121, 247, // 384, 726, 762, 555, 369, 747,
+         48, 115, 151,  79,  78,  97  // 150, 351, 459, 243, 240, 297
+    });
+
+    // See dequantized values to the right.
+    std::vector<uint8_t> output(
+    {
+         64,  72,   0, 255,   8, 236, //  93696, 104544, 6096(clamped), 379620(clamped), 17712, 328680,
+         77,  15,  92,  16,  10,  21, // 112200,  26676,        132192,           29160, 21120,  35640
+    });
+
+    return MultiplicationUint8TestHelper(workloadFactory,
+                                         shape,
+                                         input0,
+                                         4.0f,
+                                         1,
+                                         shape,
+                                         input1,
+                                         3.0f,
+                                         -2,
+                                         shape,
+                                         output,
+                                         1366.255f, // Scale/offset chosen to have output values out of range.
+                                         -5);
+}
+
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<uint8_t> input0({
+        1, 2, 3,    4,  5,  6,
+        7, 8, 9,   10, 11, 12
+    });
+
+    std::vector<uint8_t> input1({2});
+
+    std::vector<uint8_t> output({
+        2,  4,   6,     8, 10, 12,
+        14, 16, 18,    20, 22, 24
+    });
+
+    return MultiplicationUint8TestHelper(workloadFactory,
+                                         shape0,
+                                         input0,
+                                         1.0f,
+                                         0,
+                                         shape1,
+                                         input1,
+                                         1.0f,
+                                         0,
+                                         shape0,
+                                         output,
+                                         1.0f,
+                                         0);
+}
+
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int shape0[] = { 1, 2, 2, 3 };
+    const unsigned int shape1[] = { 1, 1, 1, 3 };
+
+    std::vector<uint8_t> input0({
+        1, 2, 3,    4,  5,  6,
+        7, 8, 9,   10, 11, 12
+    });
+
+    std::vector<uint8_t> input1({1, 2, 3});
+
+    std::vector<uint8_t> output({
+        1,  4,   9,     4, 10, 18,
+        7, 16,  27,    10, 22, 36
+    });
+
+    return MultiplicationUint8TestHelper(workloadFactory,
+                                         shape0,
+                                         input0,
+                                         1.0f,
+                                         0,
+                                         shape1,
+                                         input1,
+                                         1.0f,
+                                         0,
+                                         shape0,
+                                         output,
+                                         1.0f,
+                                         0);
+}
+
+namespace
+{
+template <typename T>
+LayerTestResult<T, 4> SubtractionTestHelper(armnn::IWorkloadFactory& workloadFactory,
+                                            const unsigned int shape0[4],
+                                            const std::vector<T>& values0,
+                                            float scale0,
+                                            int32_t offset0,
+                                            const unsigned int shape1[4],
+                                            const std::vector<T> & values1,
+                                            float scale1,
+                                            int32_t offset1,
+                                            const unsigned int outShape[4],
+                                            const std::vector<T> & outValues,
+                                            float outScale,
+                                            int32_t outOffset)
+{
+    auto dataType = (std::is_same<T, uint8_t>::value ?
+                     armnn::DataType::QuantisedAsymm8 :
+                     armnn::DataType::Float32);
+
+    armnn::TensorInfo inputTensorInfo0(4, shape0, dataType);
+    armnn::TensorInfo inputTensorInfo1(4, shape1, dataType);
+    armnn::TensorInfo outputTensorInfo(4, outShape, dataType);
+
+    inputTensorInfo0.SetQuantizationScale(scale0);
+    inputTensorInfo0.SetQuantizationOffset(offset0);
+
+    inputTensorInfo1.SetQuantizationScale(scale1);
+    inputTensorInfo1.SetQuantizationOffset(offset1);
+
+    outputTensorInfo.SetQuantizationScale(outScale);
+    outputTensorInfo.SetQuantizationOffset(outOffset);
+
+    auto input0 = MakeTensor<T, 4>(inputTensorInfo0, values0);
+    auto input1 = MakeTensor<T, 4>(inputTensorInfo1, values1);
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outValues);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle0 = workloadFactory.CreateTensorHandle(inputTensorInfo0);
+    std::unique_ptr<armnn::ITensorHandle> inputHandle1 = workloadFactory.CreateTensorHandle(inputTensorInfo1);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::SubtractionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data,  info, inputTensorInfo0, inputHandle0.get());
+    AddInputToWorkload(data,  info, inputTensorInfo1, inputHandle1.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSubtraction(data, info);
+
+    inputHandle0->Allocate();
+    inputHandle1->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle0.get(), &input0[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle1.get(), &input1[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+    return result;
+}
+} // anonymous namespace
+
+LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 2, 2 };
+
+    std::vector<uint8_t> input0({ 10, 12, 14, 16 });
+    std::vector<uint8_t> input1({ 1, 2, 1, 2 });
+    std::vector<uint8_t> output({ 3, 3, 5, 5 });
+
+    return SubtractionTestHelper(workloadFactory,
+                                 shape0, input0, 0.5f, 2,
+                                 shape1, input1, 1.0f, 0,
+                                 shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<uint8_t> input0({ 10, 12, 14, 16 });
+    std::vector<uint8_t> input1({ 2 });
+    std::vector<uint8_t> output({ 5, 6, 7, 8 });
+
+    return SubtractionTestHelper(workloadFactory,
+                                 shape0, input0, 0.5f, 2,
+                                 shape1, input1, 1.0f, 0,
+                                 shape0, output, 1.0f, 3);
+}
+
+LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 2, 1 };
+
+    std::vector<uint8_t> input0({ 10, 12, 14, 16 });
+    std::vector<uint8_t> input1({ 2, 1 });
+    std::vector<uint8_t> output({ 8, 11, 12, 15 });
+
+    return SubtractionTestHelper(workloadFactory,
+                                 shape0, input0, 1.0f, 0,
+                                 shape1, input1, 1.0f, 0,
+                                 shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 2, 2 };
+
+    std::vector<float> input0({ 1,  2, 3, 4 });
+    std::vector<float> input1({ 1, -1, 0, 2 });
+    std::vector<float> output({ 0,  3, 3, 2 });
+
+    return SubtractionTestHelper(workloadFactory,
+                                 shape0, input0, 1.0f, 0,
+                                 shape1, input1, 1.0f, 0,
+                                 shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 1, 1 };
+
+    std::vector<float> input0({ 1,  2, 3, 4 });
+    std::vector<float> input1({ 10 });
+    std::vector<float> output({ -9,  -8, -7, -6 });
+
+    return SubtractionTestHelper(workloadFactory,
+                                 shape0, input0, 1.0f, 0,
+                                 shape1, input1, 1.0f, 0,
+                                 shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int shape0[] = { 1, 1, 2, 2 };
+    const unsigned int shape1[] = { 1, 1, 1, 2 };
+
+    std::vector<float> input0({ 1,  2, 3, 4 });
+    std::vector<float> input1({ 10, -5 });
+    std::vector<float> output({ -9,  7, -7, 9 });
+
+    return SubtractionTestHelper(workloadFactory,
+                                 shape0, input0, 1.0f, 0,
+                                 shape1, input1, 1.0f, 0,
+                                 shape0, output, 1.0f, 0);
+}
+
+LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    constexpr unsigned int inputWidth = 4;
+    constexpr unsigned int inputHeight = 4;
+    constexpr unsigned int inputChannels = 1;
+    constexpr unsigned int inputBatchSize = 1;
+
+    constexpr unsigned int outputWidth = inputWidth;
+    constexpr unsigned int outputHeight = inputHeight;
+    constexpr unsigned int outputChannels = inputChannels;
+    constexpr unsigned int outputBatchSize = inputBatchSize;
+
+    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
+        armnn::DataType::QuantisedAsymm8);
+    inputTensorInfo.SetQuantizationScale(1.5f);
+    inputTensorInfo.SetQuantizationOffset(-3);
+
+    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
+        armnn::DataType::QuantisedAsymm8);
+    outputTensorInfo.SetQuantizationScale(1.5f);
+    outputTensorInfo.SetQuantizationOffset(-3);
+
+    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
+        1, 2, 3, 4,
+        2, 3, 4, 5,
+        3, 4, 5, 6,
+        4, 5, 6, 7
+    }));
+
+    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
+    result.outputExpected = input;
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    constexpr unsigned int inputWidth = 2;
+    constexpr unsigned int inputHeight = 2;
+    constexpr unsigned int inputChannels = 1;
+    constexpr unsigned int inputBatchSize = 1;
+
+    constexpr unsigned int outputWidth = inputWidth / 2;
+    constexpr unsigned int outputHeight = inputHeight / 2;
+    constexpr unsigned int outputChannels = inputChannels;
+    constexpr unsigned int outputBatchSize = inputBatchSize;
+
+    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
+        armnn::DataType::QuantisedAsymm8);
+    inputTensorInfo.SetQuantizationScale(0.1567f);
+    inputTensorInfo.SetQuantizationOffset(1);
+
+    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
+        armnn::DataType::QuantisedAsymm8);
+    outputTensorInfo.SetQuantizationScale(0.1567f);
+    outputTensorInfo.SetQuantizationOffset(1);
+
+    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
+        1, 255,
+        200, 250
+    }));
+
+    // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
+    // then figures out the interpolants and weights. Note this is different to projecting the centre of the
+    // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
+    // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
+    // the centre).
+    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
+        1
+    }));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    constexpr unsigned int inputWidth = 4;
+    constexpr unsigned int inputHeight = 4;
+    constexpr unsigned int inputChannels = 1;
+    constexpr unsigned int inputBatchSize = 1;
+
+    constexpr unsigned int outputWidth = inputWidth / 2;
+    constexpr unsigned int outputHeight = inputHeight / 2;
+    constexpr unsigned int outputChannels = inputChannels;
+    constexpr unsigned int outputBatchSize = inputBatchSize;
+
+    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
+        armnn::DataType::QuantisedAsymm8);
+    inputTensorInfo.SetQuantizationScale(3.141592f);
+    inputTensorInfo.SetQuantizationOffset(3);
+
+    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
+        armnn::DataType::QuantisedAsymm8);
+    outputTensorInfo.SetQuantizationScale(3.141592f);
+    outputTensorInfo.SetQuantizationOffset(3);
+
+    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
+        1, 2, 3, 4,
+        2, 3, 4, 5,
+        3, 4, 5, 6,
+        4, 5, 6, 7
+    }));
+
+    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
+        1, 3,
+        3, 5
+    }));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    constexpr unsigned int inputWidth = 3;
+    constexpr unsigned int inputHeight = 2;
+    constexpr unsigned int inputChannels = 1;
+    constexpr unsigned int inputBatchSize = 1;
+
+    constexpr unsigned int outputWidth = 2;
+    constexpr unsigned int outputHeight = 1;
+    constexpr unsigned int outputChannels = inputChannels;
+    constexpr unsigned int outputBatchSize = inputBatchSize;
+
+    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
+        armnn::DataType::QuantisedAsymm8);
+    inputTensorInfo.SetQuantizationScale(1.5f);
+    inputTensorInfo.SetQuantizationOffset(-1);
+
+    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
+        armnn::DataType::QuantisedAsymm8);
+    outputTensorInfo.SetQuantizationScale(1.5f);
+    outputTensorInfo.SetQuantizationOffset(-1);
+
+    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
+        1,  2,  3, // 3.0, 4.5, 6.0
+        5,  8, 13  // 9.0, 13.5, 21.0
+    }));
+
+    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
+        1, 3 // 3.0, 5.25
+    }));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    constexpr unsigned int inputWidth = 2;
+    constexpr unsigned int inputHeight = 3;
+    constexpr unsigned int inputChannels = 1;
+    constexpr unsigned int inputBatchSize = 1;
+
+    constexpr unsigned int outputWidth = 5;
+    constexpr unsigned int outputHeight = 3;
+    constexpr unsigned int outputChannels = inputChannels;
+    constexpr unsigned int outputBatchSize = inputBatchSize;
+
+    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
+        armnn::DataType::QuantisedAsymm8);
+    inputTensorInfo.SetQuantizationScale(0.010765f);
+    inputTensorInfo.SetQuantizationOffset(7);
+
+    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
+        armnn::DataType::QuantisedAsymm8);
+    outputTensorInfo.SetQuantizationScale(0.010132f);
+    outputTensorInfo.SetQuantizationOffset(-18);
+
+    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
+         24, 228, // 0.183005, 2.379065,
+        105, 128, // 1.05497, 1.302565
+        230,  71  // 2.400595, 0.68896
+    }));
+
+    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
+          0,  87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
+         86,  96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
+        219, 151,  84,  50,  50  // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
+    }));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // BatchSize: 1
+    // Channels: 2
+    // Height: 3
+    // Width: 2
+
+    const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Channel 0, Height (3) x Width (2)
+         1.f, 4.f,
+         4.f, 2.f,
+         1.f, 6.f,
+
+        // Batch 0, Channel 1, Height (3) x Width (2)
+         1.f, 1.f,
+         4.f, 1.f,
+        -2.f, 4.f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Channel 0, Height (3) x Width (2)
+        1.f, 4.f,
+        4.f, 2.f,
+        1.f, 6.f,
+
+        // Batch 0, Channel 1, Height (3) x Width (2)
+        3.f, 3.f,
+        4.f, 3.f,
+        2.f, 4.f
+    };
+
+    return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
+                                    0.f, 0, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> BatchNormNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // BatchSize: 1
+    // Height: 3
+    // Width: 2
+    // Channels: 2
+
+    const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Height 0, Width (2) x Channel (2)
+        1.f,  1.f,
+        4.f,  1.f,
+
+        // Batch 0, Height 1, Width (2) x Channel (2)
+        4.f,  4.f,
+        2.f,  1.f,
+
+        // Batch 0, Height 2, Width (2) x Channel (2)
+        1.f, -2.f,
+        6.f,  4.f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Height 0, Width (2) x Channel (2)
+        1.f, 3.f,
+        4.f, 3.f,
+
+        // Batch 0, Height 1, Width (2) x Channel (2)
+        4.f, 4.f,
+        2.f, 3.f,
+
+        // Batch 0, Height 2, Width (2) x Channel (2)
+        1.f, 2.f,
+        6.f, 4.f
+    };
+
+    return BatchNormTestImpl<float>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
+                                    0.f, 0, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    // BatchSize: 1
+    // Channels: 2
+    // Height: 3
+    // Width: 2
+
+    const armnn::TensorShape inputOutputShape{ 1, 2, 3, 2 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Channel 0, Height (3) x Width (2)
+         1.f, 4.f,
+         4.f, 2.f,
+         1.f, 6.f,
+
+        // Batch 0, Channel 1, Height (3) x Width (2)
+         1.f, 1.f,
+         4.f, 1.f,
+        -2.f, 4.f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Channel 0, Height (3) x Width (2)
+        1.f, 4.f,
+        4.f, 2.f,
+        1.f, 6.f,
+
+        // Batch 0, Channel 1, Height (3) x Width (2)
+        3.f, 3.f,
+        4.f, 3.f,
+        2.f, 4.f
+    };
+
+    return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
+                                      1.f/20.f, 50, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // BatchSize: 1
+    // Height: 3
+    // Width: 2
+    // Channels: 2
+
+    const armnn::TensorShape inputOutputShape{ 1, 3, 2, 2 };
+    std::vector<float> inputValues
+    {
+        // Batch 0, Height 0, Width (2) x Channel (2)
+        1.f,  1.f,
+        4.f,  1.f,
+
+        // Batch 0, Height 1, Width (2) x Channel (2)
+        4.f,  4.f,
+        2.f,  1.f,
+
+        // Batch 0, Height 2, Width (2) x Channel (2)
+        1.f, -2.f,
+        6.f,  4.f
+    };
+    std::vector<float> expectedOutputValues
+    {
+        // Batch 0, Height 0, Width (2) x Channel (2)
+        1.f, 3.f,
+        4.f, 3.f,
+
+        // Batch 0, Height 1, Width (2) x Channel (2)
+        4.f, 4.f,
+        2.f, 3.f,
+
+        // Batch 0, Height 2, Width (2) x Channel (2)
+        1.f, 2.f,
+        6.f, 4.f
+    };
+
+    return BatchNormTestImpl<uint8_t>(workloadFactory, inputOutputShape, inputValues, expectedOutputValues,
+                                      1.f/20.f, 50, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return ConstantTestImpl<uint8_t>(workloadFactory, 2e-6f, 1);
+}
+
+LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation1dTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
+}
+
+LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation2dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
+}
+
+LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation2dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
+}
+
+LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation2dDim0DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
+}
+
+LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation2dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
+}
+
+LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
+}
+
+LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim1TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
+}
+
+LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim2TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
+}
+
+LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim0TestImpl<uint8_t>(workloadFactory, 0.5f, -1);
+}
+
+LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim1DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
+}
+
+LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return Concatenation3dDim2DiffInputDimsTestImpl<uint8_t>(workloadFactory, 0.5f, -1);
+}
+
+LayerTestResult<float, 4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                 bool forceNoPadding)
+{
+    return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
+}
+
+LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                        bool forceNoPadding)
+{
+    return SimpleMaxPooling2dSize2x2Stride2x2TestCommon<uint8_t>(workloadFactory, forceNoPadding, 3.0f, -5);
+}
+
+LayerTestResult<float, 4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                 bool forceNoPadding)
+{
+    return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<float>(workloadFactory, forceNoPadding);
+}
+
+LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                        bool forceNoPadding)
+{
+    return SimpleMaxPooling2dSize3x3Stride2x4TestCommon<uint8_t>(workloadFactory, forceNoPadding, 0.1f, 128);
+}
+
+LayerTestResult<float, 4> SimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                                 const armnn::DataLayoutIndexed& dataLayout)
+{
+    return SimpleMaxPooling2dTestCommon<float>(workloadFactory, dataLayout);
+}
+
+LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                        const armnn::DataLayoutIndexed& dataLayout)
+{
+    return SimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
+}
+
+LayerTestResult<float, 4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                                     const armnn::DataLayoutIndexed& dataLayout)
+{
+    return SimpleAveragePooling2dTestCommon<float>(workloadFactory, dataLayout);
+}
+
+LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                            const armnn::DataLayoutIndexed& dataLayout)
+{
+    return SimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory, dataLayout, 0.5, -1);
+}
+
+LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                            bool forceNoPadding)
+{
+    return IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon<float>(workloadFactory, forceNoPadding);
+}
+
+LayerTestResult<float, 4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return LargeTensorsAveragePooling2dTestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return LargeTensorsAveragePooling2dTestCommon<uint8_t>(workloadFactory, 0.5, -1);
+}
+
+LayerTestResult<float, 4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                                const armnn::DataLayoutIndexed& dataLayout)
+{
+    return SimpleL2Pooling2dTestCommon<float>(workloadFactory, dataLayout);
+}
+
+LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                       const armnn::DataLayoutIndexed& dataLayout)
+{
+    return SimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory, dataLayout);
+}
+
+LayerTestResult<float, 4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return L2Pooling2dSize3Stride1TestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return L2Pooling2dSize3Stride1TestCommon<uint8_t>(workloadFactory);
+}
+
+LayerTestResult<float, 4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return L2Pooling2dSize3Stride3TestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return L2Pooling2dSize3Stride3TestCommon<uint8_t>(workloadFactory);
+}
+
+LayerTestResult<float, 4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return L2Pooling2dSize3Stride4TestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return L2Pooling2dSize3Stride4TestCommon<uint8_t>(workloadFactory);
+}
+
+LayerTestResult<float, 4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return L2Pooling2dSize7TestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return L2Pooling2dSize7TestCommon<uint8_t>(workloadFactory);
+}
+
+LayerTestResult<float, 4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return L2Pooling2dSize9TestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return L2Pooling2dSize9TestCommon<uint8_t>(workloadFactory);
+}
+
+LayerTestResult<float, 4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return AsymmetricNonSquarePooling2dTestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return AsymmetricNonSquarePooling2dTestCommon<uint8_t>(workloadFactory);
+}
+
+LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                               armnn::IWorkloadFactory& refWorkloadFactory,
+                                               armnn::PoolingAlgorithm  poolingType)
+{
+    return ComparePooling2dTestCommon<float>(workloadFactory, refWorkloadFactory, poolingType);
+}
+
+LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                      armnn::IWorkloadFactory& refWorkloadFactory,
+                                                      armnn::PoolingAlgorithm  poolingType)
+{
+    return ComparePooling2dTestCommon<uint8_t>(workloadFactory, refWorkloadFactory, poolingType, 0.1f, 128);
+}
+
+LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
+                                                  bool transposeWeights)
+{
+    return FullyConnectedLargeTestCommon<float>(workloadFactory, transposeWeights);
+}
+
+LayerTestResult<float, 4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingSimpleMaxPooling2dTestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingSimpleMaxPooling2dTestCommon<uint8_t>(workloadFactory, 1.0f, -5);
+}
+
+LayerTestResult<float, 4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingMaxPooling2dSize3TestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingMaxPooling2dSize3TestCommon<uint8_t>(workloadFactory, 1.0f, -5);
+}
+
+LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingSimpleAveragePooling2dTestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingSimpleAveragePooling2dTestCommon<uint8_t>(workloadFactory);
+}
+
+LayerTestResult<float, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
+    armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon<uint8_t>(workloadFactory);
+}
+
+LayerTestResult<float, 4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingAveragePooling2dSize3TestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingAveragePooling2dSize3TestCommon<uint8_t>(workloadFactory);
+}
+
+LayerTestResult<float, 4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingSimpleL2Pooling2dTestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingSimpleL2Pooling2dTestCommon<uint8_t>(workloadFactory);
+}
+
+LayerTestResult<float, 4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingL2Pooling2dSize3TestCommon<float>(workloadFactory);
+}
+
+LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return IgnorePaddingL2Pooling2dSize3TestCommon<uint8_t>(workloadFactory);
+}
+
+LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return SimplePermuteFloat32TestCommon(workloadFactory);
+};
+
+LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return SimplePermuteUint8TestCommon(workloadFactory);
+};
+
+LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return PermuteFloat32ValueSet1TestCommon(workloadFactory);
+};
+
+LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return PermuteFloat32ValueSet2TestCommon(workloadFactory);
+};
+
+LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    return PermuteFloat32ValueSet3TestCommon(workloadFactory);
+};
+
+namespace
+{
+
+template <typename T, std::size_t InputDim, std::size_t OutputDim>
+LayerTestResult<T, OutputDim> MeanTestHelper(armnn::IWorkloadFactory& workloadFactory,
+                                             const unsigned int* inputShape,
+                                             const std::vector<T>& inputData,
+                                             const std::vector<unsigned int>& axis,
+                                             bool keepDims,
+                                             const unsigned int* outputShape,
+                                             const std::vector<T>& outputData,
+                                             float scale = 1.0f,
+                                             int32_t offset = 0)
+{
+    auto dataType = (std::is_same<T, uint8_t>::value ? armnn::DataType::QuantisedAsymm8 : armnn::DataType::Float32);
+
+    armnn::TensorInfo inputTensorInfo(InputDim, inputShape, dataType);
+    armnn::TensorInfo outputTensorInfo(OutputDim, outputShape, dataType);
+
+    inputTensorInfo.SetQuantizationScale(scale);
+    inputTensorInfo.SetQuantizationOffset(offset);
+
+    outputTensorInfo.SetQuantizationScale(scale);
+    outputTensorInfo.SetQuantizationOffset(offset);
+
+    auto input = MakeTensor<T, InputDim>(inputTensorInfo, inputData);
+
+    LayerTestResult<T, OutputDim> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, OutputDim>(outputTensorInfo, outputData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::MeanQueueDescriptor data;
+    data.m_Parameters.m_Axis = axis;
+    data.m_Parameters.m_KeepDims = keepDims;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data,  info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateMean(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), input.origin());
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(result.output.origin(), outputHandle.get());
+
+    return result;
+}
+
+} // anonymous namespace
+
+LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 3, 2 };
+    const unsigned int outputShape[] = { 1 };
+
+    std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
+    std::vector<uint8_t> output({ 2 });
+
+    return MeanTestHelper<uint8_t, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
+}
+
+LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 1, 1, 3, 2 };
+    const unsigned int outputShape[] = { 1, 1, 2 };
+
+    std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
+    std::vector<uint8_t> output({ 2, 2 });
+
+    return MeanTestHelper<uint8_t, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
+}
+
+LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 1, 1, 3, 2 };
+    const unsigned int outputShape[] = { 1, 1, 1, 2 };
+
+    std::vector<uint8_t> input({ 1, 1, 2, 2, 3, 3 });
+    std::vector<uint8_t> output({ 2, 2 });
+
+    return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
+}
+
+LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 2, 3, 1, 2 };
+    const unsigned int outputShape[] = { 1, 3, 1, 1 };
+
+    std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6 });
+    std::vector<uint8_t> output({ 1, 3, 5 });
+
+    return MeanTestHelper<uint8_t, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
+}
+
+LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 4, 3, 2 };
+    const unsigned int outputShape[] = { 2 };
+
+    std::vector<uint8_t> input({ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+                                 24 });
+    std::vector<uint8_t> output({ 12, 13 });
+
+    return MeanTestHelper<uint8_t, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape,
+                                         output, 0.8f, 5);
+}
+
+LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 3, 2 };
+    const unsigned int outputShape[] = { 1 };
+
+    std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
+    std::vector<float> output({ 2.0f });
+
+    return MeanTestHelper<float, 2, 1>(workloadFactory, inputShape, input, {}, false, outputShape, output);
+}
+
+LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 2, 3, 1, 2 };
+    const unsigned int outputShape[] = { 3, 1, 2 };
+
+    std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
+    std::vector<float> output({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
+
+    return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 0 }, false, outputShape, output);
+}
+
+LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 1, 1, 3, 2 };
+    const unsigned int outputShape[] = { 1, 1, 1, 2 };
+
+    std::vector<float> input({ 1.0f, 1.0f, 2.0f, 2.0f, 3.0f, 3.0f });
+    std::vector<float> output({ 2.0f, 2.0f });
+
+    return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 2 }, true, outputShape, output);
+}
+
+LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 2, 3, 1, 2 };
+    const unsigned int outputShape[] = { 1, 3, 1, 1 };
+
+    std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f });
+    std::vector<float> output({ 1.5f, 3.5f, 5.5f });
+
+    return MeanTestHelper<float, 4, 4>(workloadFactory, inputShape, input, { 0, 3 }, true, outputShape, output);
+}
+
+LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 4, 3, 2 };
+    const unsigned int outputShape[] = { 2 };
+
+    std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
+                               15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
+    std::vector<float> output({ 12.0f, 13.0f });
+
+    return MeanTestHelper<float, 3, 1>(workloadFactory, inputShape, input, { 0, 1 }, false, outputShape, output);
+}
+
+LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 4, 3, 2 };
+    const unsigned int outputShape[] = { 1, 3, 1 };
+
+    std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
+                               15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f });
+    std::vector<float> output({ 10.5f, 12.5f, 14.5f });
+
+    return MeanTestHelper<float, 3, 3>(workloadFactory, inputShape, input, { 0, 2 }, true, outputShape, output);
+}
+
+LayerTestResult<float, 3> MeanVtsFloat3Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    const unsigned int inputShape[] = { 1, 2, 2, 1 };
+    const unsigned int outputShape[] = { 1, 2, 1 };
+
+    std::vector<float> input({ 1.0f, 2.0f, 3.0f, 4.0f });
+    std::vector<float> output({ 1.5f, 3.5f });
+
+    return MeanTestHelper<float, 4, 3>(workloadFactory, inputShape, input, { 2 }, false, outputShape, output);
+}
+
+LayerTestResult<float, 4> AdditionAfterMaxPoolTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // Create Initial Tensor
+    // 1, 2, 3
+    // 4, 5, 6
+    // 7, 8, 9
+
+    armnn::TensorInfo poolingInputTensorInfo({ 1, 1, 3, 3}, armnn::GetDataType<float>());
+    armnn::TensorInfo poolingOutputTensorInfo({ 1, 1, 2, 2}, armnn::GetDataType<float>());
+
+    boost::multi_array<float, 4> poolingInput = MakeTensor<float,4>(poolingInputTensorInfo,
+                                                            {1, 2, 3,
+                                                             4, 5, 6,
+                                                             7, 8, 9
+                                                            });
+
+    std::unique_ptr<armnn::ITensorHandle> poolingInputHandle =
+            workloadFactory.CreateTensorHandle(poolingInputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> poolingOutputHandle =
+            workloadFactory.CreateTensorHandle(poolingOutputTensorInfo);
+
+    // Apply MaxPool poolSize = 1x1, stride=2x2
+    // Result =
+    // 1, 3
+    // 7, 9
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolHeight = 1;
+    descriptor.m_PoolWidth = 1;
+    descriptor.m_StrideX = 2;
+    descriptor.m_StrideY = 2;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+
+    armnn::Pooling2dQueueDescriptor queueDescriptor;
+    queueDescriptor.m_Parameters = descriptor;
+    armnn::WorkloadInfo workloadInfo;
+    AddInputToWorkload(queueDescriptor, workloadInfo, poolingInputTensorInfo, poolingInputHandle.get());
+    AddOutputToWorkload(queueDescriptor, workloadInfo, poolingOutputTensorInfo, poolingOutputHandle.get());
+
+    // Create the MaxPool
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
+
+    //LayerTestResult<float, 4> result(poolingOutputTensorInfo);
+    auto shape( GetTensorShapeAsArray<4>(poolingOutputTensorInfo));
+    boost::multi_array<float, 4> resultMaxPool;
+    resultMaxPool.resize(shape);
+
+
+    // Create addition with another tensor the same size
+    // This would be the result to apply a Conv2d with kernel ones(2) and stride 1x1
+    // with the initial tensor.
+    // 12, 16
+    // 24, 28
+
+    armnn::TensorInfo addInputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
+    armnn::TensorInfo addOutputTensorInfo({ 1,1,2,2}, armnn::GetDataType<float>());
+
+    boost::multi_array<float, 4> addInput = MakeTensor<float,4>(addInputTensorInfo,
+                                                                    {12, 16,
+                                                                     24, 28,
+                                                                    });
+
+    // Expected output tensor after MaxPool and Addition.
+    LayerTestResult<float,4> addRet(addOutputTensorInfo);
+    addRet.outputExpected = MakeTensor<float, 4>(addOutputTensorInfo, std::vector<float>(
+            {
+                    13, 19,
+                    31, 37
+            }));
+
+    std::unique_ptr<armnn::ITensorHandle> addInputHandle = workloadFactory.CreateTensorHandle(addInputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> addOutputHandle = workloadFactory.CreateTensorHandle(addOutputTensorInfo);
+
+    armnn::AdditionQueueDescriptor data;
+    armnn::WorkloadInfo info;
+
+    // Add the output of the MaxPool and the new tensor
+    AddInputToWorkload(data, info, poolingOutputTensorInfo, poolingOutputHandle.get());
+    AddInputToWorkload(data, info, addInputTensorInfo, addInputHandle.get());
+    AddOutputToWorkload(data, info, addOutputTensorInfo, addOutputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> addWorkload = workloadFactory.CreateAddition(data, info);
+
+    poolingInputHandle->Allocate();
+    poolingOutputHandle->Allocate();
+    addInputHandle->Allocate();
+    addOutputHandle->Allocate();
+
+    CopyDataToITensorHandle(poolingInputHandle.get(), &poolingInput[0][0][0][0]);
+    CopyDataFromITensorHandle(&resultMaxPool[0][0][0][0], poolingOutputHandle.get());
+
+    CopyDataToITensorHandle(poolingOutputHandle.get(), &resultMaxPool[0][0][0][0]);
+    CopyDataToITensorHandle(addInputHandle.get(), &addInput[0][0][0][0]);
+
+    workload->Execute();
+    addWorkload->Execute();
+
+    CopyDataFromITensorHandle(&addRet.output[0][0][0][0], addOutputHandle.get());
+
+    workloadFactory.Finalize();
+
+    return addRet;
+}
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
new file mode 100644
index 0000000..57383d3
--- /dev/null
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -0,0 +1,416 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+
+#include <Half.hpp>
+
+#include <boost/multi_array.hpp>
+#include <boost/assert.hpp>
+
+#include <array>
+
+// Layer callables.
+
+namespace armnn
+{
+class IWorkloadFactory;
+}
+
+template <std::size_t n>
+boost::array<unsigned int, n> GetTensorShapeAsArray(const armnn::TensorInfo& tensorInfo)
+{
+    BOOST_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
+        "Attempting to construct a shape array of mismatching size");
+
+    boost::array<unsigned int, n> shape;
+    for (unsigned int i = 0; i < n; i++)
+    {
+        shape[i] = tensorInfo.GetShape()[i];
+    }
+    return shape;
+}
+
+template <typename T, std::size_t n>
+struct LayerTestResult
+{
+    LayerTestResult(const armnn::TensorInfo& outputInfo)
+    {
+        auto shape( GetTensorShapeAsArray<n>(outputInfo) );
+        output.resize(shape);
+        outputExpected.resize(shape);
+        supported = true;
+    }
+
+    boost::multi_array<T, n> output;
+    boost::multi_array<T, n> outputExpected;
+    bool supported;
+};
+
+LayerTestResult<float, 4> SimpleConvolution2d3x5Test(armnn::IWorkloadFactory& workloadFactory,
+                                                     bool biasEnabled,
+                                                     const armnn::DataLayoutIndexed& layout);
+
+LayerTestResult<float, 4> SimpleConvolution2d3x3Test(armnn::IWorkloadFactory& workloadFactory,
+                                                     bool biasEnabled,
+                                                     const armnn::DataLayoutIndexed& layout);
+
+LayerTestResult<float, 4> SimpleConvolution2d3x3NhwcTest(armnn::IWorkloadFactory& workloadFactory,
+                                                         bool biasEnabled);
+
+LayerTestResult<float, 4>
+Convolution2dAsymmetricPaddingLargerThanHalfKernelSizeTest(armnn::IWorkloadFactory& workloadFactory,
+                                                           const armnn::DataLayoutIndexed& layout);
+LayerTestResult<float, 4> Convolution2dAsymmetricPaddingTest(armnn::IWorkloadFactory& workloadFactory,
+                                                             const armnn::DataLayoutIndexed& layout);
+
+
+LayerTestResult<float,   4> Convolution1dTest(armnn::IWorkloadFactory& workloadFactory,
+                                              bool biasEnabled);
+LayerTestResult<uint8_t, 4> Convolution1dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                   bool biasEnabled);
+
+LayerTestResult<float, 4> DepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                                     bool biasEnabled,
+                                                     const armnn::DataLayoutIndexed& layout);
+
+LayerTestResult<float, 4> DepthwiseConvolution2dDepthNhwcTest(armnn::IWorkloadFactory& workloadFactory,
+                                                              bool biasEnabled);
+
+LayerTestResult<float, 4> DepthwiseConvolution2dDepthMul1Test(armnn::IWorkloadFactory& workloadFactory,
+                                                              bool biasEnabled,
+                                                              const armnn::DataLayoutIndexed& layout);
+
+LayerTestResult<float, 4> DepthwiseConvolution2dAsymmetricTest(armnn::IWorkloadFactory& workloadFactory,
+                                                               bool biasEnabled,
+                                                               const armnn::DataLayoutIndexed& layout);
+
+LayerTestResult<float,   4> SimpleMaxPooling2dSize2x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                   bool forceNoPadding);
+LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize2x2Stride2x2Uint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                        bool forceNoPadding);
+LayerTestResult<float,   4> SimpleMaxPooling2dSize3x3Stride2x4Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                   bool forceNoPadding);
+LayerTestResult<uint8_t, 4> SimpleMaxPooling2dSize3x3Stride2x4Uint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                        bool forceNoPadding );
+LayerTestResult<float,   4> IgnorePaddingSimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> IgnorePaddingSimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float,   4> IgnorePaddingMaxPooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> IgnorePaddingMaxPooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float,   4> SimpleMaxPooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                                   const armnn::DataLayoutIndexed& dataLayout);
+LayerTestResult<uint8_t, 4> SimpleMaxPooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                        const armnn::DataLayoutIndexed& dataLayout);
+
+LayerTestResult<float,   4> SimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                                       const armnn::DataLayoutIndexed& dataLayout);
+LayerTestResult<uint8_t, 4> SimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                            const armnn::DataLayoutIndexed& dataLayout);
+
+LayerTestResult<float,   4> IgnorePaddingAveragePooling2dSize3x2Stride2x2Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                              bool forceNoPadding);
+LayerTestResult<float,   4> IgnorePaddingSimpleAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4>   IgnorePaddingSimpleAveragePooling2dNoPaddingTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test(
+    armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float,   4> IgnorePaddingAveragePooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> IgnorePaddingAveragePooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float,   4> SimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                                  const armnn::DataLayoutIndexed& dataLayout);
+LayerTestResult<uint8_t, 4> SimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                       const armnn::DataLayoutIndexed& dataLayout);
+
+LayerTestResult<float,   4> L2Pooling2dSize3Stride1Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride1Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float,   4> L2Pooling2dSize3Stride3Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride3Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float,   4> L2Pooling2dSize3Stride4Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> L2Pooling2dSize3Stride4Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float,   4> L2Pooling2dSize7Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> L2Pooling2dSize7Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float,   4> L2Pooling2dSize9Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> L2Pooling2dSize9Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float,   4> LargeTensorsAveragePooling2dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> LargeTensorsAveragePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float,   4> IgnorePaddingSimpleL2Pooling2dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> IgnorePaddingSimpleL2Pooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float,   4> IgnorePaddingL2Pooling2dSize3Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> IgnorePaddingL2Pooling2dSize3Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float,   4> AsymmetricNonSquarePooling2dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> AsymmetricNonSquarePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> ComparePooling2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                               armnn::IWorkloadFactory& refWorkloadFactory,
+                                               armnn::PoolingAlgorithm  poolingType);
+LayerTestResult<uint8_t, 4> ComparePooling2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                      armnn::IWorkloadFactory& refWorkloadFactory,
+                                                      armnn::PoolingAlgorithm  poolingType);
+
+LayerTestResult<float, 4> ConstantLinearActivationTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> SimpleNormalizationAcrossTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> SimpleNormalizationWithinTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float,4> SimpleNormalizationAcrossNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 2> SimpleSoftmaxTest(armnn::IWorkloadFactory& workloadFactory, float beta);
+LayerTestResult<uint8_t, 2> SimpleSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory, float beta);
+
+LayerTestResult<float, 4> SimpleSigmoidTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> SimpleReshapeFloat32Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> SimpleReshapeUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> SimpleFloorTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 1> Concatenation1dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 2> Concatenation2dDim0Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 2> Concatenation2dDim1Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 2> Concatenation2dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 2> Concatenation2dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 3> Concatenation3dDim0Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 3> Concatenation3dDim1Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 3> Concatenation3dDim2Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 3> Concatenation3dDim0DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 3> Concatenation3dDim1DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 3> Concatenation3dDim2DiffInputDimsTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 4> SimpleSigmoidUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> CompareConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
+    armnn::IWorkloadFactory& refWorkloadFactory);
+
+template<typename T>
+LayerTestResult<T, 4> CompareDepthwiseConvolution2dTest(armnn::IWorkloadFactory& workloadFactory,
+                                                        armnn::IWorkloadFactory& refWorkloadFactory,
+                                                        const armnn::DataLayoutIndexed& layout);
+
+LayerTestResult<float, 4> CompareNormalizationTest(armnn::IWorkloadFactory& workloadFactory,
+                                                   armnn::IWorkloadFactory& refWorkloadFactory,
+                                                   armnn::NormalizationAlgorithmChannel normChannel,
+                                                   armnn::NormalizationAlgorithmMethod normMethod);
+
+LayerTestResult<float, 2> CompareSoftmaxTest(armnn::IWorkloadFactory& workloadFactory,
+    armnn::IWorkloadFactory& refWorkloadFactory, float beta);
+
+LayerTestResult<float, 2> FullyConnectedFloat32Test(armnn::IWorkloadFactory& workloadFactory,
+                                             bool                     biasEnabled,
+                                             bool                     transposeWeights);
+
+std::vector<LayerTestResult<float, 3>> SplitterTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 3> CopyViaSplitterTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 3> MergerTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> AdditionTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> AdditionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> AdditionBroadcastTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> CompareAdditionTest(armnn::IWorkloadFactory& workloadFactory,
+                                              armnn::IWorkloadFactory& refWorkloadFactory);
+
+LayerTestResult<float, 4> SubtractionTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> SubtractionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> SubtractionBroadcastTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> CompareActivationTest(armnn::IWorkloadFactory&  workloadFactory,
+                                                armnn::IWorkloadFactory&  refWorkloadFactory,
+                                                armnn::ActivationFunction f,
+                                                unsigned int batchSize);
+
+LayerTestResult<float, 4> DivisionTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> DivisionByZeroTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> DivisionBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> DivisionBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> MultiplicationTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> MultiplicationBroadcast1ElementTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> MultiplicationBroadcast1DVectorTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> CompareMultiplicationTest(armnn::IWorkloadFactory& workloadFactory,
+                                             armnn::IWorkloadFactory& refWorkloadFactory);
+
+LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> BatchNormNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> CompareBatchNormTest(armnn::IWorkloadFactory& workloadFactory,
+                                        armnn::IWorkloadFactory& refWorkloadFactory);
+
+LayerTestResult<float, 4> BoundedReLuUpperAndLowerBoundTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperAndLowerBoundTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> BoundedReLuUpperBoundOnlyTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> BoundedReLuUint8UpperBoundOnlyTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> CompareBoundedReLuTest(armnn::IWorkloadFactory& workloadFactory,
+                                                 armnn::IWorkloadFactory& refWorkloadFactory,
+                                                 float upperBound,
+                                                 float lowerBound);
+
+// Tests that the output should be identical to the input when the output dimensions match the input ones.
+LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory,
+                                                const armnn::DataLayoutIndexed& dataLayout);
+
+// Tests the behaviour of the resize bilinear operation when rescaling a 2x2 image into a 1x1 image.
+LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory,
+                                                   const armnn::DataLayoutIndexed& dataLayout);
+
+// Tests the resize bilinear for minification of a square input matrix (also: input dimensions are a
+// multiple of output dimensions).
+LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory,
+                                                  const armnn::DataLayoutIndexed& dataLayout);
+
+// Tests the resize bilinear for minification (output dimensions smaller than input dimensions).
+LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory,
+                                                const armnn::DataLayoutIndexed& dataLayout);
+
+// Tests the resize bilinear for magnification (output dimensions bigger than input dimensions).
+LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory,
+                                                const armnn::DataLayoutIndexed& dataLayout);
+
+LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> BatchNormNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> L2Normalization1dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> L2Normalization2dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> L2Normalization3dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> L2Normalization4dTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> L2Normalization1dNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> L2Normalization2dNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> L2Normalization3dNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> L2Normalization4dNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> ConstantTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 4> ConstantTestUint8(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 4> BoundedReLuUint8Test(armnn::IWorkloadFactory& workloadFactory, float upperBound);
+LayerTestResult<uint8_t, 4> BoundedReLuUint8Test(armnn::IWorkloadFactory& workloadFactory,
+    float upperBound,
+    float lowerBound);
+
+LayerTestResult<uint8_t, 2> FullyConnectedUint8Test(armnn::IWorkloadFactory& workloadFactory, bool biasEnabled);
+
+std::vector<LayerTestResult<uint8_t, 3>> SplitterUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 3> CopyViaSplitterUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 3> MergerUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 4> AdditionUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> AdditionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> AdditionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 4> SubtractionUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> SubtractionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> SubtractionBroadcastUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 4> CompareActivationUint8Test(armnn::IWorkloadFactory&  workloadFactory,
+                                                       armnn::IWorkloadFactory&  refWorkloadFactory,
+                                                       armnn::ActivationFunction f);
+
+LayerTestResult<uint8_t, 2> CompareSoftmaxUint8Test(armnn::IWorkloadFactory& workloadFactory,
+    armnn::IWorkloadFactory& refWorkloadFactory,
+    float beta);
+
+LayerTestResult<uint8_t, 4> MultiplicationUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> MultiplicationBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 4> DivisionUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> DivisionBroadcast1ElementUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> DivisionBroadcast1DVectorUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 4> SimpleConvolution2d3x5Uint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                            bool                     biasEnabled,
+                                                            const armnn::DataLayoutIndexed& layout);
+
+LayerTestResult<uint8_t, 4> SimpleConvolution2d3x3Uint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                            bool                     biasEnabled,
+                                                            const armnn::DataLayoutIndexed& layout);
+
+LayerTestResult<uint8_t, 4> DepthwiseConvolution2dUint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                            bool                     biasEnabled,
+                                                            const armnn::DataLayoutIndexed& layout);
+
+LayerTestResult<uint8_t, 4> DepthwiseConvolution2dDepthMul1Uint8Test(armnn::IWorkloadFactory& workloadFactory,
+                                                                     bool biasEnabled,
+                                                                     const armnn::DataLayoutIndexed& layout);
+
+LayerTestResult<uint8_t, 4> ConstantLinearActivationUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 4> BatchNormUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> BatchNormUint8NhwcTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 4> ConstantUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 1> Concatenation1dUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 2> Concatenation2dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 2> Concatenation2dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 2> Concatenation2dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 2> Concatenation2dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 3> Concatenation3dDim0Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 3> Concatenation3dDim1Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 3> Concatenation3dDim2Uint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 3> Concatenation3dDim0DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 3> Concatenation3dDim1DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 3> Concatenation3dDim2DiffInputDimsUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+
+LayerTestResult<float, 2> FullyConnectedLargeTest(armnn::IWorkloadFactory& workloadFactory,
+                                                  bool transposeWeights);
+LayerTestResult<float, 4> SimplePermuteFloat32Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> SimplePermuteUint8Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<uint8_t, 2> PadUint82dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 3> PadUint83dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> PadUint84dTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 2> PadFloat322dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 3> PadFloat323dTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> PadFloat324dTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> PermuteFloat32ValueSet1Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> PermuteFloat32ValueSet2Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> PermuteFloat32ValueSet3Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest
+        (armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 2>
+        LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 2>
+LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> SimpleConvertFp16ToFp32Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<armnn::Half, 4> SimpleConvertFp32ToFp16Test(armnn::IWorkloadFactory& workloadFactory);
+
+
+LayerTestResult<uint8_t, 1> MeanUint8SimpleTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 3> MeanUint8SimpleAxisTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> MeanUint8KeepDimsTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 4> MeanUint8MultipleDimsTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<uint8_t, 1> MeanVtsUint8Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 1> MeanFloatSimpleTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 3> MeanFloatSimpleAxisTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> MeanFloatKeepDimsTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> MeanFloatMultipleDimsTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 1> MeanVtsFloat1Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 3> MeanVtsFloat2Test(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 3> MeanVtsFloat3Test(armnn::IWorkloadFactory& workloadFactory);
+
+LayerTestResult<float, 4> AdditionAfterMaxPoolTest(armnn::IWorkloadFactory& workloadFactory);
diff --git a/src/backends/backendsCommon/test/LstmTestImpl.hpp b/src/backends/backendsCommon/test/LstmTestImpl.hpp
new file mode 100644
index 0000000..758f294d
--- /dev/null
+++ b/src/backends/backendsCommon/test/LstmTestImpl.hpp
@@ -0,0 +1,1150 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "QuantizeHelper.hpp"
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<float, 2> LstmNoCifgNoPeepholeNoProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                                   const boost::multi_array<float, 2>& input,
+                                                                   const boost::multi_array<float, 2>& outputExpected)
+{
+    unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
+    unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
+    unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+    // cellSize and outputSize have the same size when there is no projection.
+    unsigned numUnits = outputSize;
+
+
+    armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::GetDataType<float>());
+    armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::GetDataType<float>());
+
+
+    armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, armnn::GetDataType<float>());
+    armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::GetDataType<float>());
+    armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+
+
+    LayerTestResult<float, 2> ret(outputTensorInfo);
+
+    std::vector<float> inputVector;
+    inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
+    auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
+
+    std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
+    auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
+
+    std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
+    auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
+
+    std::vector<float> scratchBufferVector(batchSize * numUnits * 3, 0.f);
+    auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
+
+    std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
+    auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
+
+    std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
+    auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
+
+    std::vector<float> outputVector;
+    outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
+    ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
+            workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
+            workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
+            workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
+            workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+
+    armnn::LstmQueueDescriptor data;
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
+    AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
+
+    AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
+    AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
+    AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    armnn::TensorInfo tensorInfo4({numUnits}, armnn::GetDataType<float>());
+    armnn::TensorInfo tensorInfo8({numUnits, 2}, armnn::GetDataType<float>());
+    armnn::TensorInfo tensorInfo16({numUnits, 4}, armnn::GetDataType<float>());
+
+    auto inputToInputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.45018822f, -0.02338299f, -0.0870589f,
+                                                                  -0.34550029f, 0.04266912f, -0.15680569f,
+                                                                  -0.34856534f, 0.43890524f});
+
+    auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfo8, {0.09701663f, 0.20334584f, -0.50592935f,
+                                                                   -0.31343272f, -0.40032279f, 0.44781327f,
+                                                                   0.01387155f, -0.35593212f});
+
+    auto inputToCellWeights = MakeTensor<float, 2>(tensorInfo8, {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f,
+                                                                 -0.20583314f, 0.44344562f, 0.22077113f,
+                                                                 -0.29909778f});
+
+    auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.25065863f, -0.28290087f, 0.04613829f,
+                                                                   0.40525138f, 0.44272184f, 0.03897077f,
+                                                                   -0.1556896f, 0.19487578f});
+
+    auto recurrentToInputWeights = MakeTensor<float, 2>(tensorInfo16, {-0.0063535f, -0.2042388f, 0.31454784f,
+                                                                       -0.35746509f, 0.28902304f, 0.08183324f,
+                                                                       -0.16555229f, 0.02286911f, -0.13566875f,
+                                                                       0.03034258f, 0.48091322f, -0.12528998f,
+                                                                       0.24077177f, -0.51332325f, -0.33502164f,
+                                                                       0.10629296f});
+
+    auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfo16, {-0.48684245f, -0.06655136f, 0.42224967f,
+                                                                        0.2112639f, 0.27654213f, 0.20864892f,
+                                                                        -0.07646349f, 0.45877004f, 0.00141793f,
+                                                                        -0.14609534f, 0.36447752f, 0.09196436f,
+                                                                        0.28053468f, 0.01560611f, -0.20127171f,
+                                                                        -0.01140004f});
+
+    auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfo16, {-0.3407414f, 0.24443203f, -0.2078532f,
+                                                                      0.26320225f, 0.05695659f, -0.00123841f,
+                                                                      -0.4744786f, -0.35869038f, -0.06418842f,
+                                                                      -0.13502428f, -0.501764f, 0.22830659f,
+                                                                      -0.46367589f, 0.26016325f, -0.03894562f,
+                                                                      -0.16368064f});
+
+    auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfo16, {0.43385774f, -0.17194885f, 0.2718237f,
+                                                                        0.09215671f, 0.24107647f, -0.39835793f,
+                                                                        0.18212086f, 0.01301402f, 0.48572797f,
+                                                                        -0.50656658f, 0.20047462f, -0.20607421f,
+                                                                        -0.51818722f, -0.15390486f, 0.0468148f,
+                                                                        0.39922136f});
+
+    auto cellToInputWeights = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
+
+    auto inputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
+
+    auto forgetGateBias = MakeTensor<float, 1>(tensorInfo4, {1., 1., 1., 1.});
+
+    auto cellBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
+
+    auto outputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
+
+    armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo8);
+    armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo8);
+    armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo8);
+    armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo8);
+    armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
+    armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
+    armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
+    armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
+    armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
+    armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
+    armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
+    armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
+    armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
+
+    AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
+    AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
+    AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
+    AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
+    AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
+
+    data.m_InputToInputWeights = &inputToInputWeightsTensor;
+    data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
+    data.m_InputToCellWeights = &inputToCellWeightsTensor;
+    data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
+    data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
+    data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
+    data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
+    data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
+    data.m_CellToInputWeights = &cellToInputWeightsTensor;
+    data.m_InputGateBias = &inputGateBiasTensor;
+    data.m_ForgetGateBias = &forgetGateBiasTensor;
+    data.m_CellBias = &cellBiasTensor;
+    data.m_OutputGateBias = &outputGateBiasTensor;
+
+
+    // Flags to set test configuration
+    data.m_Parameters.m_ActivationFunc = 4;
+    data.m_Parameters.m_CifgEnabled = false;
+    data.m_Parameters.m_PeepholeEnabled = false;
+    data.m_Parameters.m_ProjectionEnabled = false;
+
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
+    inputHandle->Allocate();
+    outputStateInHandle->Allocate();
+    cellStateInHandle->Allocate();
+
+    scratchHandle->Allocate();
+    outputStateOutHandle->Allocate();
+    cellStateOutHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+    CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
+    CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+
+    return ret;
+}
+
+
+LayerTestResult<float, 2>
+LstmLayerFloat32NoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                             const boost::multi_array<float, 2>& input,
+                                             const boost::multi_array<float, 2>& outputExpected) {
+
+    unsigned int batchSize = 2;
+    unsigned int outputSize = 16;
+    unsigned int inputSize = 5;
+    unsigned numUnits = 20;
+
+    armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::GetDataType<float>());
+    armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::GetDataType<float>());
+
+    // Scratch buffer size without CIFG [batchSize, numUnits * 3]
+    armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 3}, armnn::GetDataType<float>());
+    armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, armnn::GetDataType<float>());
+    armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+
+    LayerTestResult<float, 2> ret(outputTensorInfo);
+
+    std::vector<float> inputVector;
+    inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
+    auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
+
+    std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
+    auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
+
+    std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
+    auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
+
+    std::vector<float> scratchBufferVector(batchSize * numUnits * 3, 0.f);
+    auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
+
+    std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
+    auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
+
+    std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
+    auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
+
+    std::vector<float> outputVector;
+    outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
+    ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
+            workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
+            workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
+            workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
+            workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::LstmQueueDescriptor data;
+    armnn::WorkloadInfo info;
+
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
+    AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
+
+    AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
+    AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
+    AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    armnn::TensorInfo tensorInfo16({outputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo tensorInfo20({numUnits}, armnn::GetDataType<float>());
+    armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, armnn::GetDataType<float>());
+
+    auto inputToInputWeights =
+            MakeTensor<float, 2>(tensorInfo20x5, {0.021393683f,0.06124551f,  0.046905167f,-0.014657677f,-0.03149463f,
+                                                  0.09171803f, 0.14647801f,0.10797193f,   -0.0057968358f,0.0019193048f,
+                                                  -0.2726754f, 0.10154029f, -0.018539885f, 0.080349885f, -0.10262385f,
+                                                  -0.022599787f,-0.09121155f, -0.008675967f, -0.045206103f,-0.0821282f,
+                                                  -0.008045952f,0.015478081f, 0.055217247f,  0.038719587f, 0.044153627f,
+                                                  -0.06453243f,0.05031825f, -0.046935108f, -0.008164439f, 0.014574226f,
+                                                  -0.1671009f,   -0.15519552f, -0.16819797f,-0.13971269f,-0.11953059f,
+                                                  0.25005487f, -0.22790983f, 0.009855087f,  -0.028140958f, -0.11200698f,
+                                                  0.11295408f, -0.0035217577f, 0.054485075f,  0.05184695f, 0.064711206f,
+                                                  0.10989193f,   0.11674786f,  0.03490607f, 0.07727357f, 0.11390585f,
+                                                  -0.1863375f,  -0.1034451f, -0.13945189f, -0.049401227f, -0.18767063f,
+                                                  0.042483903f, 0.14233552f, 0.13832581f, 0.18350165f,    0.14545603f,
+                                                  -0.028545704f,0.024939531f,0.050929718f,0.0076203286f,-0.0029723682f,
+                                                  -0.042484224f, -0.11827596f, -0.09171104f,  -0.10808628f,-0.16327988f,
+                                                  -0.2273378f,   -0.0993647f, -0.017155107f,0.0023917493f,0.049272764f,
+                                                  0.0038534778f, 0.054764505f,   0.089753784f, 0.06947234f, 0.08014476f,
+                                                  -0.04544234f, -0.0497073f,-0.07135631f,  -0.048929106f,-0.004042012f,
+                                                  -0.009284026f, 0.018042054f, 0.0036860977f,-0.07427302f, -0.11434604f,
+                                                  -0.018995456f, 0.031487543f, 0.012834908f,0.019977754f,0.044256654f,
+                                                  -0.39292613f,  -0.18519334f, -0.11651281f,-0.06809892f, 0.011373677f
+            });
+
+    auto inputToForgetWeights =
+            MakeTensor<float, 2>(tensorInfo20x5, {-0.0018401089f, -0.004852237f,0.03698424f, 0.014181704f,0.028273236f,
+                                                   -0.016726194f, -0.05249759f,-0.10204261f, 0.00861066f,-0.040979505f,
+                                                   -0.009899187f,0.01923892f,-0.028177269f, -0.08535103f,-0.14585495f,
+                                                   0.10662567f,-0.01909731f,-0.017883534f,-0.0047269356f,-0.045103323f,
+                                                   0.0030784295f,0.076784775f,0.07463696f, 0.094531395f,0.0814421f,
+                                                   -0.12257899f, -0.033945758f,-0.031303465f, 0.045630626f,0.06843887f,
+                                                   -0.13492945f, -0.012480007f,-0.0811829f, -0.07224499f,-0.09628791f,
+                                                   0.045100946f,0.0012300825f, 0.013964662f, 0.099372394f,0.02543059f,
+                                                   0.06958324f,    0.034257296f, 0.0482646f, 0.06267997f,0.052625068f,
+                                                   0.12784666f,    0.07077897f,  0.025725935f, 0.04165009f,0.07241905f,
+                                                   0.018668644f, -0.037377294f,-0.06277783f,-0.08833636f,-0.040120605f,
+                                                   -0.011405586f,-0.007808335f,-0.010301386f,-0.005102167f,0.027717464f,
+                                                   0.05483423f, 0.11449111f, 0.11289652f,0.10939839f, 0.13396506f,
+                                                   -0.08402166f,-0.01901462f,  -0.044678304f,-0.07720565f,0.014350063f,
+                                                   -0.11757958f, -0.0652038f, -0.08185733f,-0.076754324f,-0.092614375f,
+                                                   0.10405491f, 0.052960336f, 0.035755895f,0.035839386f,-0.012540553f,
+                                                   0.036881298f,   0.02913376f,  0.03420159f,0.05448447f,-0.054523353f,
+                                                   0.02582715f, 0.02327355f, -0.011857179f,-0.0011980024f,-0.034641717f,
+                                                   -0.026125094f,-0.17582615f,-0.15923657f,-0.27486774f,-0.0006143371f,
+                                                   0.0001771948f,  -8.470171e-05f, 0.02651807f,0.045790765f,0.06956496f
+            });
+
+    auto inputToCellWeights =
+            MakeTensor<float, 2>(tensorInfo20x5, {-0.04580283f,   -0.09549462f,   -0.032418985f,  -0.06454633f,
+                                                  -0.043528453f,  0.043018587f,   -0.049152344f,  -0.12418144f,
+                                                  -0.078985475f,  -0.07596889f,   0.019484362f,   -0.11434962f,
+                                                  -0.0074034138f, -0.06314844f,   -0.092981495f,  0.0062155537f,
+                                                  -0.025034338f,  -0.0028890965f, 0.048929527f,   0.06235075f,
+                                                  0.10665918f,    -0.032036792f,  -0.08505916f,   -0.10843358f,
+                                                  -0.13002433f,   -0.036816437f,  -0.02130134f,   -0.016518239f,
+                                                  0.0047691227f,  -0.0025825808f, 0.066017866f,   0.029991534f,
+                                                  -0.10652836f,   -0.1037554f,    -0.13056071f,   -0.03266643f,
+                                                  -0.033702414f,  -0.006473424f,  -0.04611692f,   0.014419339f,
+                                                  -0.025174323f,  0.0396852f,     0.081777506f,   0.06157468f,
+                                                  0.10210095f,    -0.009658194f,  0.046511717f,   0.03603906f,
+                                                  0.0069369148f,  0.015960095f,   -0.06507666f,   0.09551598f,
+                                                  0.053568836f,   0.06408714f,    0.12835667f,    -0.008714329f,
+                                                  -0.20211966f,   -0.12093674f,   0.029450472f,   0.2849013f,
+                                                  -0.029227901f,  0.1164364f,     -0.08560263f,   0.09941786f,
+                                                  -0.036999565f,  -0.028842626f,  -0.0033637602f, -0.017012902f,
+                                                  -0.09720865f,   -0.11193351f,   -0.029155117f,  -0.017936034f,
+                                                  -0.009768936f,  -0.04223324f,   -0.036159635f,  0.06505112f,
+                                                  -0.021742892f,  -0.023377212f,  -0.07221364f,   -0.06430552f,
+                                                  0.05453865f,    0.091149814f,   0.06387331f,    0.007518393f,
+                                                  0.055960953f,   0.069779344f,   0.046411168f,   0.10509911f,
+                                                  0.07463894f,    0.0075130584f,  0.012850982f,   0.04555431f,
+                                                  0.056955688f,   0.06555285f,    0.050801456f,   -0.009862683f,
+                                                  0.00826772f,    -0.026555609f,  -0.0073611983f, -0.0014897042f
+            });
+
+    auto inputToOutputWeights =
+            MakeTensor<float, 2>(tensorInfo20x5, {-0.0998932f,   -0.07201956f, -0.052803773f,-0.15629593f,-0.15001918f,
+                                                  -0.07650751f,0.02359855f, -0.075155355f, -0.08037709f,  -0.15093534f,
+                                                  0.029517552f, -0.04751393f, 0.010350531f,-0.02664851f, -0.016839722f,
+                                                  -0.023121163f, 0.0077019283f, 0.012851257f, -0.05040649f,-0.0129761f,
+                                                  -0.021737747f,-0.038305793f,-0.06870586f, -0.01481247f,-0.001285394f,
+                                                  0.10124236f,  0.083122835f, 0.053313006f,-0.062235646f,-0.075637154f,
+                                                  -0.027833903f, 0.029774971f,  0.1130802f, 0.09218906f, 0.09506135f,
+                                                  -0.086665764f,-0.037162706f,-0.038880914f,-0.035832845f,-0.014481564f,
+                                                  -0.09825003f,-0.12048569f,-0.097665586f,-0.05287633f, -0.0964047f,
+                                                  -0.11366429f,  0.035777505f,  0.13568819f, 0.052451383f,0.050649304f,
+                                                  0.05798951f, -0.021852335f,-0.099848844f,0.014740475f,-0.078897946f,
+                                                  0.04974699f, 0.014160473f,  0.06973932f,    0.04964942f, 0.033364646f,
+                                                  0.08190124f,   0.025535367f, 0.050893165f, 0.048514254f,0.06945813f,
+                                                  -0.078907564f,-0.06707616f,  -0.11844508f, -0.09986688f,-0.07509403f,
+                                                  0.06263226f,   0.14925587f,   0.20188436f, 0.12098451f,0.14639415f,
+                                                  0.0015017595f, -0.014267382f, -0.03417257f,0.012711468f,0.0028300495f,
+                                                  -0.024758482f, -0.05098548f,-0.0821182f, 0.014225672f,  0.021544158f,
+                                                  0.08949725f,  0.07505268f, -0.0020780868f, 0.04908258f,0.06476295f,
+                                                  -0.022907063f,0.027562456f,0.040185735f, 0.019567577f,-0.015598739f,
+                                                  -0.049097303f, -0.017121866f, -0.083368234f,-0.02332002f,-0.0840956f
+            });
+
+    auto inputGateBias =
+            MakeTensor<float, 1>(tensorInfo20, {0.02234832f,  0.14757581f,   0.18176508f,  0.10380666f,  0.053110216f,
+                                                -0.06928846f, -0.13942584f,  -0.11816189f, 0.19483899f,  0.03652339f,
+                                                -0.10250295f, 0.036714908f,  -0.18426876f, 0.036065217f, 0.21810818f,
+                                                0.02383196f,  -0.043370757f, 0.08690144f,  -0.04444982f, 0.00030581196f
+            });
+
+    auto forgetGateBias =
+            MakeTensor<float, 1>(tensorInfo20, {0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f,
+                                                0.11098921f,  0.15378423f,   0.09263801f,  0.09790885f,
+                                                0.09508917f,  0.061199076f,  0.07665568f,  -0.015443159f,
+                                                -0.03499149f, 0.046190713f,  0.08895977f,  0.10899629f,
+                                                0.40694186f,  0.06030037f,   0.012413437f, -0.06108739f
+            });
+
+    auto cellBias =
+            MakeTensor<float, 1>(tensorInfo20, {-0.024379363f, 0.0055531194f, 0.23377132f,   0.033463873f,
+                                                -0.1483596f,   -0.10639995f,  -0.091433935f, 0.058573797f,
+                                                -0.06809782f,  -0.07889636f,  -0.043246906f, -0.09829136f,
+                                                -0.4279842f,   0.034901652f,  0.18797937f,   0.0075234566f,
+                                                0.016178843f,  0.1749513f,    0.13975595f,   0.92058027f
+            });
+
+    auto outputGateBias =
+            MakeTensor<float, 1>(tensorInfo20, {0.046159424f,  -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
+                                                0.35373217f,   -0.018957434f,  0.008907322f, -0.0762701f, 0.12018895f,
+                                                0.04216877f,   0.0022856654f,  0.040952638f,  0.3147856f,  0.08225149f,
+                                                -0.057416286f, -0.14995944f,   -0.008040261f, 0.13208859f, 0.029760877f
+            });
+
+    auto recurrentToInputWeights =
+            MakeTensor<float, 2>(tensorInfo20x16, {-0.001374326f,   -0.078856036f,   0.10672688f,    0.029162422f,
+                                                   -0.11585556f,    0.02557986f,     -0.13446963f,   -0.035785314f,
+                                                   -0.01244275f,    0.025961924f,    -0.02337298f,   -0.044228926f,
+                                                   -0.055839065f,   -0.046598054f,   -0.010546039f,  -0.06900766f,
+                                                   0.027239809f,    0.022582639f,    -0.013296484f,  -0.05459212f,
+                                                   0.08981f,        -0.045407712f,   0.08682226f,    -0.06867011f,
+                                                   -0.14390695f,    -0.02916037f,    0.000996957f,   0.091420636f,
+                                                   0.14283475f,     -0.07390571f,    -0.06402044f,   0.062524505f,
+                                                   -0.093129106f,   0.04860203f,     -0.08364217f,   -0.08119002f,
+                                                   0.009352075f,    0.22920375f,     0.0016303885f,  0.11583097f,
+                                                   -0.13732095f,    0.012405723f,    -0.07551853f,   0.06343048f,
+                                                   0.12162708f,     -0.031923793f,   -0.014335606f,  0.01790974f,
+                                                   -0.10650317f,    -0.0724401f,     0.08554849f,    -0.05727212f,
+                                                   0.06556731f,     -0.042729504f,   -0.043227166f,  0.011683251f,
+                                                   -0.013082158f,   -0.029302018f,   -0.010899579f,  -0.062036745f,
+                                                   -0.022509435f,   -0.00964907f,    -0.01567329f,   0.04260106f,
+                                                   -0.07787477f,    -0.11576462f,    0.017356863f,   0.048673786f,
+                                                   -0.017577527f,   -0.05527947f,    -0.082487635f,  -0.040137455f,
+                                                   -0.10820036f,    -0.04666372f,    0.022746278f,   -0.07851417f,
+                                                   0.01068115f,     0.032956902f,    0.022433773f,   0.0026891115f,
+                                                   0.08944216f,     -0.0685835f,     0.010513544f,   0.07228705f,
+                                                   0.02032331f,     -0.059686817f,   -0.0005566496f, -0.086984694f,
+                                                   0.040414046f,    -0.1380399f,     0.094208956f,   -0.05722982f,
+                                                   0.012092817f,    -0.04989123f,    -0.086576f,     -0.003399834f,
+                                                   -0.04696032f,    -0.045747425f,   0.10091314f,    0.048676282f,
+                                                   -0.029037097f,   0.031399418f,    -0.0040285117f, 0.047237843f,
+                                                   0.09504992f,     0.041799378f,    -0.049185462f,  -0.031518843f,
+                                                   -0.10516937f,    0.026374253f,    0.10058866f,    -0.0033195973f,
+                                                   -0.041975245f,   0.0073591834f,   0.0033782164f,  -0.004325073f,
+                                                   -0.10167381f,    0.042500053f,    -0.01447153f,   0.06464186f,
+                                                   -0.017142897f,   0.03312627f,     0.009205989f,   0.024138335f,
+                                                   -0.011337001f,   0.035530265f,    -0.010912711f,  0.0706555f,
+                                                   -0.005894094f,   0.051841937f,    -0.1401738f,    -0.02351249f,
+                                                   0.0365468f,      0.07590991f,     0.08838724f,    0.021681072f,
+                                                   -0.10086113f,    0.019608743f,    -0.06195883f,   0.077335775f,
+                                                   0.023646897f,    -0.095322326f,   0.02233014f,    0.09756986f,
+                                                   -0.048691444f,   -0.009579111f,   0.07595467f,    0.11480546f,
+                                                   -0.09801813f,    0.019894179f,    0.08502348f,    0.004032281f,
+                                                   0.037211012f,    0.068537936f,    -0.048005626f,  -0.091520436f,
+                                                   -0.028379958f,   -0.01556313f,    0.06554592f,    -0.045599163f,
+                                                   -0.01672207f,    -0.020169014f,   -0.011877351f,  -0.20212261f,
+                                                   0.010889619f,    0.0047078193f,   0.038385306f,   0.08540671f,
+                                                   -0.017140968f,   -0.0035865551f,  0.016678626f,   0.005633034f,
+                                                   0.015963363f,    0.00871737f,     0.060130805f,   0.028611384f,
+                                                   0.10109069f,     -0.015060172f,   -0.07894427f,   0.06401885f,
+                                                   0.011584063f,    -0.024466386f,   0.0047652307f,  -0.09041358f,
+                                                   0.030737216f,    -0.0046374933f,  0.14215417f,    -0.11823516f,
+                                                   0.019899689f,    0.006106124f,    -0.027092824f,  0.0786356f,
+                                                   0.05052217f,     -0.058925f,      -0.011402121f,  -0.024987547f,
+                                                   -0.0013661642f,  -0.06832946f,    -0.015667673f,  -0.1083353f,
+                                                   -0.00096863037f, -0.06988685f,    -0.053350925f,  -0.027275559f,
+                                                   -0.033664223f,   -0.07978348f,    -0.025200296f,  -0.017207067f,
+                                                   -0.058403496f,   -0.055697463f,   0.005798788f,   0.12965427f,
+                                                   -0.062582195f,   0.0013350133f,   -0.10482091f,   0.0379771f,
+                                                   0.072521195f,    -0.0029455067f,  -0.13797039f,   -0.03628521f,
+                                                   0.013806405f,    -0.017858358f,   -0.01008298f,   -0.07700066f,
+                                                   -0.017081132f,   0.019358726f,    0.0027079724f,  0.004635139f,
+                                                   0.062634714f,    -0.02338735f,    -0.039547626f,  -0.02050681f,
+                                                   0.03385117f,     -0.083611414f,   0.002862572f,   -0.09421313f,
+                                                   0.058618143f,    -0.08598433f,    0.00972939f,    0.023867095f,
+                                                   -0.053934585f,   -0.023203006f,   0.07452513f,    -0.048767887f,
+                                                   -0.07314807f,    -0.056307215f,   -0.10433547f,   -0.06440842f,
+                                                   0.04328182f,     0.04389765f,     -0.020006588f,  -0.09076438f,
+                                                   -0.11652589f,    -0.021705797f,   0.03345259f,    -0.010329105f,
+                                                   -0.025767034f,   0.013057034f,    -0.07316461f,   -0.10145612f,
+                                                   0.06358255f,     0.18531723f,     0.07759293f,    0.12006465f,
+                                                   0.1305557f,      0.058638252f,    -0.03393652f,   0.09622831f,
+                                                   -0.16253184f,    -2.4580743e-06f, 0.079869635f,   -0.070196845f,
+                                                   -0.005644518f,   0.06857898f,     -0.12598175f,   -0.035084512f,
+                                                   0.03156317f,     -0.12794146f,    -0.031963028f,  0.04692781f,
+                                                   0.030070418f,    0.0071660685f,   -0.095516115f,  -0.004643372f,
+                                                   0.040170413f,    -0.062104587f,   -0.0037324072f, 0.0554317f,
+                                                   0.08184801f,     -0.019164372f,   0.06791302f,    0.034257166f,
+                                                   -0.10307039f,    0.021943003f,    0.046745934f,   0.0790918f,
+                                                   -0.0265588f,     -0.007824208f,   0.042546265f,   -0.00977924f,
+                                                   -0.0002440307f,  -0.017384544f,   -0.017990116f,  0.12252321f,
+                                                   -0.014512694f,   -0.08251313f,    0.08861942f,    0.13589665f,
+                                                   0.026351685f,    0.012641483f,    0.07466548f,    0.044301085f,
+                                                   -0.045414884f,   -0.051112458f,   0.03444247f,    -0.08502782f,
+                                                   -0.04106223f,    -0.028126027f,   0.028473156f,   0.10467447f
+            });
+
+    auto recurrentToForgetWeights =
+            MakeTensor<float, 2>(tensorInfo20x16, {-0.057784554f,  -0.026057621f,  -0.068447545f,   -0.022581743f,
+                                                   0.14811787f,    0.10826372f,    0.09471067f,     0.03987225f,
+                                                   -0.0039523416f, 0.00030638507f, 0.053185795f,    0.10572994f,
+                                                   0.08414449f,    -0.022036452f,  -0.00066928595f, -0.09203576f,
+                                                   0.032950465f,   -0.10985798f,   -0.023809856f,   0.0021431844f,
+                                                   -0.02196096f,   -0.00326074f,   0.00058621005f,  -0.074678116f,
+                                                   -0.06193199f,   0.055729095f,   0.03736828f,     0.020123724f,
+                                                   0.061878487f,   -0.04729229f,   0.034919553f,    -0.07585433f,
+                                                   -0.04421272f,   -0.044019096f,  0.085488975f,    0.04058006f,
+                                                   -0.06890133f,   -0.030951202f,  -0.024628663f,   -0.07672815f,
+                                                   0.034293607f,   0.08556707f,    -0.05293577f,    -0.033561368f,
+                                                   -0.04899627f,   0.0241671f,     0.015736353f,    -0.095442444f,
+                                                   -0.029564252f,  0.016493602f,   -0.035026584f,   0.022337519f,
+                                                   -0.026871363f,  0.004780428f,   0.0077918363f,   -0.03601621f,
+                                                   0.016435321f,   -0.03263031f,   -0.09543275f,    -0.047392778f,
+                                                   0.013454138f,   0.028934088f,   0.01685226f,     -0.086110644f,
+                                                   -0.046250615f,  -0.01847454f,   0.047608484f,    0.07339695f,
+                                                   0.034546845f,   -0.04881143f,   0.009128804f,    -0.08802852f,
+                                                   0.03761666f,    0.008096139f,   -0.014454086f,   0.014361001f,
+                                                   -0.023502491f,  -0.0011840804f, -0.07607001f,    0.001856849f,
+                                                   -0.06509276f,   -0.006021153f,  -0.08570962f,    -0.1451793f,
+                                                   0.060212336f,   0.055259194f,   0.06974018f,     0.049454916f,
+                                                   -0.027794661f,  -0.08077226f,   -0.016179763f,   0.1169753f,
+                                                   0.17213494f,    -0.0056326236f, -0.053934924f,   -0.0124349f,
+                                                   -0.11520337f,   0.05409887f,    0.088759385f,    0.0019655675f,
+                                                   0.0042065294f,  0.03881498f,    0.019844765f,    0.041858196f,
+                                                   -0.05695512f,   0.047233116f,   0.038937137f,    -0.06542224f,
+                                                   0.014429736f,   -0.09719407f,   0.13908425f,     -0.05379757f,
+                                                   0.012321099f,   0.082840554f,   -0.029899208f,   0.044217527f,
+                                                   0.059855383f,   0.07711018f,    -0.045319796f,   0.0948846f,
+                                                   -0.011724666f,  -0.0033288454f, -0.033542685f,   -0.04764985f,
+                                                   -0.13873616f,   0.040668588f,   0.034832682f,    -0.015319203f,
+                                                   -0.018715994f,  0.046002675f,   0.0599172f,      -0.043107376f,
+                                                   0.0294216f,     -0.002314414f,  -0.022424703f,   0.0030315618f,
+                                                   0.0014641669f,  0.0029166266f,  -0.11878115f,    0.013738511f,
+                                                   0.12375372f,    -0.0006038222f, 0.029104086f,    0.087442465f,
+                                                   0.052958444f,   0.07558703f,    0.04817258f,     0.044462286f,
+                                                   -0.015213451f,  -0.08783778f,   -0.0561384f,     -0.003008196f,
+                                                   0.047060397f,   -0.002058388f,  0.03429439f,     -0.018839769f,
+                                                   0.024734668f,   0.024614193f,   -0.042046934f,   0.09597743f,
+                                                   -0.0043254104f, 0.04320769f,    0.0064070094f,   -0.0019131786f,
+                                                   -0.02558259f,   -0.022822596f,  -0.023273505f,   -0.02464396f,
+                                                   -0.10991725f,   -0.006240552f,  0.0074488563f,   0.024044557f,
+                                                   0.04383914f,    -0.046476185f,  0.028658995f,    0.060410924f,
+                                                   0.050786525f,   0.009452605f,   -0.0073054377f,  -0.024810238f,
+                                                   0.0052906186f,  0.0066939713f,  -0.0020913032f,  0.014515517f,
+                                                   0.015898481f,   0.021362653f,   -0.030262267f,   0.016587038f,
+                                                   -0.011442813f,  0.041154444f,   -0.007631438f,   -0.03423484f,
+                                                   -0.010977775f,  0.036152758f,   0.0066366293f,   0.11915515f,
+                                                   0.02318443f,    -0.041350313f,  0.021485701f,    -0.10906167f,
+                                                   -0.028218046f,  -0.00954771f,   0.020531068f,    -0.11995105f,
+                                                   -0.03672871f,   0.024019798f,   0.014255957f,    -0.05221243f,
+                                                   -0.00661567f,   -0.04630967f,   0.033188973f,    0.10107534f,
+                                                   -0.014027541f,  0.030796422f,   -0.10270911f,    -0.035999842f,
+                                                   0.15443139f,    0.07684145f,    0.036571592f,    -0.035900835f,
+                                                   -0.0034699554f, 0.06209149f,    0.015920248f,    -0.031122351f,
+                                                   -0.03858649f,   0.01849943f,    0.13872518f,     0.01503974f,
+                                                   0.069941424f,   -0.06948533f,   -0.0088794185f,  0.061282158f,
+                                                   -0.047401894f,  0.03100163f,    -0.041533746f,   -0.10430945f,
+                                                   0.044574402f,   -0.01425562f,   -0.024290353f,   0.034563623f,
+                                                   0.05866852f,    0.023947537f,   -0.09445152f,    0.035450947f,
+                                                   0.02247216f,    -0.0042998926f, 0.061146557f,    -0.10250651f,
+                                                   0.020881841f,   -0.06747029f,   0.10062043f,     -0.0023941975f,
+                                                   0.03532124f,    -0.016341697f,  0.09685456f,     -0.016764693f,
+                                                   0.051808182f,   0.05875331f,    -0.04536488f,    0.001626336f,
+                                                   -0.028892258f,  -0.01048663f,   -0.009793449f,   -0.017093895f,
+                                                   0.010987891f,   0.02357273f,    -0.00010856845f, 0.0099760275f,
+                                                   -0.001845119f,  -0.03551521f,   0.0018358806f,   0.05763657f,
+                                                   -0.01769146f,   0.040995963f,   0.02235177f,     -0.060430344f,
+                                                   0.11475477f,    -0.023854522f,  0.10071741f,     0.0686208f,
+                                                   -0.014250481f,  0.034261297f,   0.047418304f,    0.08562733f,
+                                                   -0.030519066f,  0.0060542435f,  0.014653856f,    -0.038836084f,
+                                                   0.04096551f,    0.032249358f,   -0.08355519f,    -0.026823482f,
+                                                   0.056386515f,   -0.010401743f,  -0.028396193f,   0.08507674f,
+                                                   0.014410365f,   0.020995233f,   0.17040324f,     0.11511526f,
+                                                   0.02459721f,    0.0066619175f,  0.025853224f,    -0.023133837f,
+                                                   -0.081302024f,  0.017264642f,   -0.009585969f,   0.09491168f,
+                                                   -0.051313367f,  0.054532815f,   -0.014298593f,   0.10657464f,
+                                                   0.007076659f,   0.10964551f,    0.0409152f,      0.008275321f,
+                                                   -0.07283536f,   0.07937492f,    0.04192024f,     -0.1075027f
+            });
+
+    auto recurrentToCellWeights =
+            MakeTensor<float, 2>(tensorInfo20x16, {-0.037322544f,   0.018592842f,   0.0056175636f,  -0.06253426f,
+                                                   0.055647098f,    -0.05713207f,   -0.05626563f,   0.005559383f,
+                                                   0.03375411f,     -0.025757805f,  -0.088049285f,  0.06017052f,
+                                                   -0.06570978f,    0.007384076f,   0.035123326f,   -0.07920549f,
+                                                   0.053676967f,    0.044480428f,   -0.07663568f,   0.0071805613f,
+                                                   0.08089997f,     0.05143358f,    0.038261272f,   0.03339287f,
+                                                   -0.027673481f,   0.044746667f,   0.028349208f,   0.020090483f,
+                                                   -0.019443132f,   -0.030755889f,  -0.0040000007f, 0.04465846f,
+                                                   -0.021585021f,   0.0031670958f,  0.0053199246f,  -0.056117613f,
+                                                   -0.10893326f,    0.076739706f,   -0.08509834f,   -0.027997585f,
+                                                   0.037871376f,    0.01449768f,    -0.09002357f,   -0.06111149f,
+                                                   -0.046195522f,   0.0422062f,     -0.005683705f,  -0.1253618f,
+                                                   -0.012925729f,   -0.04890792f,   0.06985068f,    0.037654128f,
+                                                   0.03398274f,     -0.004781977f,  0.007032333f,   -0.031787455f,
+                                                   0.010868644f,    -0.031489216f,  0.09525667f,    0.013939797f,
+                                                   0.0058680447f,   0.0167067f,     0.02668468f,    -0.04797466f,
+                                                   -0.048885044f,   -0.12722108f,   0.035304096f,   0.06554885f,
+                                                   0.00972396f,     -0.039238118f,  -0.05159735f,   -0.11329045f,
+                                                   0.1613692f,      -0.03750952f,   0.06529313f,    -0.071974665f,
+                                                   -0.11769596f,    0.015524369f,   -0.0013754242f, -0.12446318f,
+                                                   0.02786344f,     -0.014179351f,  0.005264273f,   0.14376344f,
+                                                   0.015983658f,    0.03406988f,    -0.06939408f,   0.040699873f,
+                                                   0.02111075f,     0.09669095f,    0.041345075f,   -0.08316494f,
+                                                   -0.07684199f,    -0.045768797f,  0.032298047f,   -0.041805092f,
+                                                   0.0119405f,      0.0061010392f,  0.12652606f,    0.0064572375f,
+                                                   -0.024950314f,   0.11574242f,    0.04508852f,    -0.04335324f,
+                                                   0.06760663f,     -0.027437469f,  0.07216407f,    0.06977076f,
+                                                   -0.05438599f,    0.034033038f,   -0.028602652f,  0.05346137f,
+                                                   0.043184172f,    -0.037189785f,  0.10420091f,    0.00882477f,
+                                                   -0.054019816f,   -0.074273005f,  -0.030617684f,  -0.0028467078f,
+                                                   0.024302477f,    -0.0038869337f, 0.005332455f,   0.0013399826f,
+                                                   0.04361412f,     -0.007001822f,  0.09631092f,    -0.06702025f,
+                                                   -0.042049985f,   -0.035070654f,  -0.04103342f,   -0.10273396f,
+                                                   0.0544271f,      0.037184782f,   -0.13150354f,   -0.0058036847f,
+                                                   -0.008264958f,   0.042035464f,   0.05891794f,    0.029673764f,
+                                                   0.0063542654f,   0.044788733f,   0.054816857f,   0.062257513f,
+                                                   -0.00093483756f, 0.048938446f,   -0.004952862f,  -0.007730018f,
+                                                   -0.04043371f,    -0.017094059f,  0.07229206f,    -0.023670016f,
+                                                   -0.052195564f,   -0.025616996f,  -0.01520939f,   0.045104615f,
+                                                   -0.007376126f,   0.003533447f,   0.006570588f,   0.056037236f,
+                                                   0.12436656f,     0.051817212f,   0.028532185f,   -0.08686856f,
+                                                   0.11868599f,     0.07663395f,    -0.07323171f,   0.03463402f,
+                                                   -0.050708205f,   -0.04458982f,   -0.11590894f,   0.021273347f,
+                                                   0.1251325f,      -0.15313013f,   -0.12224372f,   0.17228661f,
+                                                   0.023029093f,    0.086124025f,   0.006445803f,   -0.03496501f,
+                                                   0.028332196f,    0.04449512f,    -0.042436164f,  -0.026587414f,
+                                                   -0.006041347f,   -0.09292539f,   -0.05678812f,   0.03897832f,
+                                                   0.09465633f,     0.008115513f,   -0.02171956f,   0.08304309f,
+                                                   0.071401566f,    0.019622514f,   0.032163795f,   -0.004167056f,
+                                                   0.02295182f,     0.030739572f,   0.056506045f,   0.004612461f,
+                                                   0.06524936f,     0.059999723f,   0.046395954f,   -0.0045512207f,
+                                                   -0.1335546f,     -0.030136576f,  0.11584653f,    -0.014678886f,
+                                                   0.0020118146f,   -0.09688814f,   -0.0790206f,    0.039770417f,
+                                                   -0.0329582f,     0.07922767f,    0.029322514f,   0.026405897f,
+                                                   0.04207835f,     -0.07073373f,   0.063781224f,   0.0859677f,
+                                                   -0.10925287f,    -0.07011058f,   0.048005477f,   0.03438226f,
+                                                   -0.09606514f,    -0.006669445f,  -0.043381985f,  0.04240257f,
+                                                   -0.06955775f,    -0.06769346f,   0.043903265f,   -0.026784198f,
+                                                   -0.017840602f,   0.024307009f,   -0.040079936f,  -0.019946516f,
+                                                   0.045318738f,    -0.12233574f,   0.026170589f,   0.0074471775f,
+                                                   0.15978073f,     0.10185836f,    0.10298046f,    -0.015476589f,
+                                                   -0.039390966f,   -0.072174534f,  0.0739445f,     -0.1211869f,
+                                                   -0.0347889f,     -0.07943156f,   0.014809798f,   -0.12412325f,
+                                                   -0.0030663363f,  0.039695457f,   0.0647603f,     -0.08291318f,
+                                                   -0.018529687f,   -0.004423833f,  0.0037507233f,  0.084633216f,
+                                                   -0.01514876f,    -0.056505352f,  -0.012800942f,  -0.06994386f,
+                                                   0.012962922f,    -0.031234352f,  0.07029052f,    0.016418684f,
+                                                   0.03618972f,     0.055686004f,   -0.08663945f,   -0.017404709f,
+                                                   -0.054761406f,   0.029065743f,   0.052404847f,   0.020238016f,
+                                                   0.0048197987f,   -0.0214882f,    0.07078733f,    0.013016777f,
+                                                   0.06262858f,     0.009184685f,   0.020785125f,   -0.043904778f,
+                                                   -0.0270329f,     -0.03299152f,   -0.060088247f,  -0.015162964f,
+                                                   -0.001828936f,   0.12642565f,    -0.056757294f,  0.013586685f,
+                                                   0.09232601f,     -0.035886683f,  0.06000002f,    0.05229691f,
+                                                   -0.052580316f,   -0.082029596f,  -0.010794592f,  0.012947712f,
+                                                   -0.036429964f,   -0.085508935f,  -0.13127148f,   -0.017744139f,
+                                                   0.031502828f,    0.036232427f,   -0.031581745f,  0.023051167f,
+                                                   -0.05325106f,    -0.03421577f,   0.028793324f,   -0.034633752f,
+                                                   -0.009881397f,   -0.043551125f,  -0.018609839f,  0.0019097115f,
+                                                   -0.008799762f,   0.056595087f,   0.0022273948f,  0.055752404f
+            });
+
+    auto recurrentToOutputWeights =
+            MakeTensor<float, 2>(tensorInfo20x16, {0.025825322f, -0.05813119f, 0.09495884f,-0.045984812f, -0.01255415f,
+                                                    -0.0026479573f,-0.08196161f,-0.054914974f,-0.0046604523f,
+                                                   -0.029587349f, -0.044576716f,  -0.07480124f,  -0.082868785f,
+                                                   0.023254942f,    0.027502948f, -0.0039728214f, -0.08683098f,
+                                                   -0.08116779f,  -0.014675607f,   -0.037924774f, -0.023314456f,
+                                                   -0.007401714f, -0.09255757f,  0.029460307f,    -0.08829125f,
+                                                    -0.005139627f,  -0.08989442f,  -0.0555066f,   0.13596267f,
+                                                   -0.025062224f, -0.048351806f,  -0.03850004f,  0.07266485f,
+                                                   -0.022414139f,   0.05940088f, 0.075114764f,   0.09597592f,
+                                                   -0.010211725f, -0.0049794707f,  -0.011523867f, -0.025980417f,
+                                                   0.072999895f,  0.11091378f,   -0.081685916f,   0.014416728f,
+                                                    0.043229222f,   0.034178585f,  -0.07530371f,  0.035837382f,
+                                                   -0.085607f, -0.007721233f,  -0.03287832f,  -0.043848954f,
+                                                   -0.06404588f,    -0.06632928f, -0.073643476f,  0.008214239f,
+                                                   -0.045984086f, 0.039764922f,    0.03474462f, 0.060612556f,
+                                                   -0.080590084f, 0.049127717f,  0.04151091f,     -0.030063879f,
+                                                    0.008801774f,   -0.023021035f, -0.019558564f, 0.05158114f,
+                                                   -0.010947698f, -0.011825728f,  0.0075720972f, 0.0699727f,
+                                                   -0.0039981045f,  0.069350146f, 0.08799282f,    0.016156472f,
+                                                   0.035502106f,  0.11695009f,     0.006217345f, 0.13392477f,
+                                                   -0.037875112f, 0.025745004f,  0.08940699f,     -0.00924166f,
+                                                    0.0046702605f,  -0.036598757f, -0.08811812f,  0.10522024f,
+                                                   -0.032441203f, 0.008176899f,   -0.04454919f,  0.07058152f,
+                                                   0.0067963637f,   0.039206743f, 0.03259838f,    0.03725492f,
+                                                   -0.09515802f,  0.013326398f,    -0.052055415f, -0.025676316f,
+                                                   0.03198509f,   -0.015951829f, -0.058556724f,   0.036879618f,
+                                                    0.043357447f,   0.028362012f,  -0.05908629f,  0.0059240665f,
+                                                   -0.04995891f, -0.019187413f,0.0276265f, -0.01628143f, 0.0025863599f,
+                                                   0.08800015f, 0.035250366f,   -0.022165963f, -0.07328642f,
+                                                   -0.009415526f,   -0.07455109f, 0.11690406f,    0.0363299f,
+                                                   0.07411125f,   0.042103454f,    -0.009660886f, 0.019076364f,
+                                                   0.018299393f, -0.046004917f, 0.08891175f,0.0431396f, -0.026327137f,
+                                                   -0.051502608f, 0.08979574f,   -0.051670972f,   0.04940282f,
+                                                    -0.07491107f,   -0.021240504f, 0.022596184f,  -0.034280192f,
+                                                   0.060163025f, -0.058211457f,  -0.051837247f, -0.01349775f,
+                                                   -0.04639988f,    -0.035936575f, -0.011681591f,  0.064818054f,
+                                                   0.0073146066f, -0.021745546f,   -0.043124277f, -0.06471268f,
+                                                   -0.07053354f,  -0.029321948f, -0.05330136f,    0.016933719f,
+                                                    -0.053782392f,  0.13747959f,   -0.1361751f,   -0.11569455f,
+                                                   0.0033329215f, 0.05693899f,    -0.053219706f, 0.063698f,
+                                                   0.07977434f,     -0.07924483f, 0.06936997f,    0.0034815092f,
+                                                   -0.007305279f, -0.037325785f,   -0.07251102f, -0.033633437f,
+                                                   -0.08677009f,  0.091591336f,  -0.14165086f,    0.021752775f,
+                                                    0.019683983f,   0.0011612234f, -0.058154266f, 0.049996935f,
+                                                   0.0288841f, -0.0024567875f, -0.14345716f, 0.010955264f,-0.10234828f,
+                                                   0.1183656f, -0.0010731248f, -0.023590032f,-0.072285876f,-0.0724771f,
+                                                   -0.026382286f, -0.0014920527f, 0.042667855f,  0.0018776858f,
+                                                   0.02986552f,     0.009814309f, 0.0733756f,     0.12289186f,
+                                                   0.018043943f,  -0.0458958f,     0.049412545f, 0.033632483f,
+                                                   0.05495232f,   0.036686596f,  -0.013781798f,   -0.010036754f,
+                                                    0.02576849f,    -0.08307328f,  0.010112348f,  0.042521734f,
+                                                   -0.05869831f, -0.071689695f, 0.03876447f, -0.13275425f, -0.0352966f,
+                                                   -0.023077697f, 0.10285965f,    0.084736146f,  0.15568255f,
+                                                   -0.00040734606f, 0.027835453f, -0.10292561f,   -0.032401145f,
+                                                   0.10053256f,   -0.026142767f,   -0.08271222f, -0.0030240538f,
+                                                   -0.016368777f, 0.1070414f,    0.042672627f,    0.013456989f,
+                                                    -0.0437609f,    -0.022309763f, 0.11576483f,   0.04108048f,
+                                                   0.061026827f, -0.0190714f,  -0.0869359f, 0.037901703f,  0.0610107f,
+                                                   0.07202949f, 0.01675338f,    0.086139716f,  -0.08795751f,
+                                                   -0.014898893f,   -0.023771819f, -0.01965048f,   0.007955471f,
+                                                   -0.043740474f, 0.03346837f,     -0.10549954f, 0.090567775f,
+                                                   0.042013682f,  -0.03176985f,  0.12569028f,     -0.02421228f,
+                                                    -0.029526481f,  0.023851605f,  0.031539805f,  0.05292009f,
+                                                   -0.02344001f, -0.07811758f,   -0.08834428f,  0.10094801f,
+                                                   0.16594367f,     -0.06861939f, -0.021256343f,  -0.041093912f,
+                                                   -0.06669611f,  0.035498552f,    0.021757556f, -0.09302526f,
+                                                   -0.015403468f, -0.06614931f,  -0.051798206f,   -0.013874718f,
+                                                    0.03630673f,    0.010412845f,  -0.08077351f,  0.046185967f,
+                                                   0.0035662893f, 0.03541868f,    -0.094149634f, -0.034814864f,
+                                                   0.003128424f,    -0.020674974f, -0.03944324f,   -0.008110165f,
+                                                   -0.11113267f,  0.08484226f,     0.043586485f, 0.040582247f,
+                                                   0.0968012f,    -0.065249965f, -0.028036479f,   0.0050708856f,
+                                                    0.0017462453f,  0.0326779f,    0.041296225f,  0.09164146f,
+                                                   -0.047743853f, -0.015952192f,  -0.034451712f, 0.084197424f,
+                                                   -0.05347844f,    -0.11768019f, 0.085926116f,   -0.08251791f,
+                                                   -0.045081906f, 0.0948852f,      0.068401024f, 0.024856757f,
+                                                   0.06978981f,   -0.057309967f, -0.012775832f,   -0.0032452994f,
+                                                    0.01977615f, -0.041040014f, -0.024264973f,0.063464895f, 0.05431621f
+            });
+
+    auto cellToInputWeights =
+            MakeTensor<float, 1>(tensorInfo20, {0.040369894f, 0.030746894f,  0.24704495f,  0.018586371f, -0.037586458f,
+                                                -0.15312155f, -0.11812848f,  -0.11465643f, 0.20259799f,   0.11418174f,
+                                                -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f,-0.052169047f,
+                                                0.21198851f,  -0.38871562f,  -0.09061183f, -0.09683246f,  -0.21929175f
+            });
+
+
+    auto cellToForgetWeights =
+            MakeTensor<float, 1>(tensorInfo20, {-0.01998659f,-0.15568835f,-0.24248174f,   -0.012770197f, 0.041331276f,
+                                                -0.072311886f, -0.052123554f,-0.0066330447f,-0.043891653f,0.036225766f,
+                                                -0.047248036f, 0.021479502f,0.033189066f, 0.11952997f,   -0.020432774f,
+                                                0.64658105f,   -0.06650122f,  -0.03467612f,  0.095340036f, 0.23647355f
+            });
+
+    auto cellToOutputWeights =
+            MakeTensor<float, 1>(tensorInfo20, {0.08286371f,  -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
+                                                -0.5495371f,  -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
+                                                -0.11940523f, 0.007358328f, 0.1890978f,   0.4833202f,   -0.34441817f,
+                                                0.36312827f,  -0.26375428f, 0.1457655f,   -0.19724406f, 0.15548733f
+            });
+
+    auto projectionWeights =
+            MakeTensor<float, 2>(tensorInfo16x20,
+                                 {-0.009802181f,  0.09401916f,    0.0717386f,     -0.13895074f,  0.09641832f,
+                                  0.060420845f,   0.08539281f,    0.054285463f,   0.061395317f,  0.034448683f,
+                                  -0.042991187f,  0.019801661f,   -0.16840284f,   -0.015726732f, -0.23041931f,
+                                  -0.024478018f,  -0.10959692f,   -0.013875541f,  0.18600968f,   -0.061274476f,
+                                  0.0138165f,     -0.08160894f,   -0.07661644f,   0.032372914f,  0.16169067f,
+                                  0.22465782f,    -0.03993472f,   -0.004017731f,  0.08633481f,   -0.28869787f,
+                                  0.08682067f,    0.17240396f,    0.014975425f,   0.056431185f,  0.031037588f,
+                                  0.16702051f,    0.0077946745f,  0.15140012f,    0.29405436f,   0.120285f,
+                                  -0.188994f,     -0.027265169f,  0.043389652f,   -0.022061434f, 0.014777949f,
+                                  -0.20203483f,   0.094781205f,   0.19100232f,    0.13987629f,   -0.036132768f,
+                                  -0.06426278f,   -0.05108664f,   0.13221376f,    0.009441198f,  -0.16715929f,
+                                  0.15859416f,    -0.040437475f,  0.050779544f,   -0.022187516f, 0.012166504f,
+                                  0.027685808f,   -0.07675938f,   -0.0055694645f, -0.09444123f,  0.0046453946f,
+                                  0.050794356f,   0.10770313f,    -0.20790008f,   -0.07149004f,  -0.11425117f,
+                                  0.008225835f,   -0.035802525f,  0.14374903f,    0.15262283f,   0.048710253f,
+                                  0.1847461f,     -0.007487823f,  0.11000021f,    -0.09542012f,  0.22619456f,
+                                  -0.029149994f,  0.08527916f,    0.009043713f,   0.0042746216f, 0.016261552f,
+                                  0.022461696f,   0.12689082f,    -0.043589946f,  -0.12035478f,  -0.08361797f,
+                                  -0.050666027f,  -0.1248618f,    -0.1275799f,    -0.071875185f, 0.07377272f,
+                                  0.09944291f,    -0.18897448f,   -0.1593054f,    -0.06526116f,  -0.040107165f,
+                                  -0.004618631f,  -0.067624845f,  -0.007576253f,  0.10727444f,   0.041546922f,
+                                  -0.20424393f,   0.06907816f,    0.050412357f,   0.00724631f,   0.039827548f,
+                                  0.12449835f,    0.10747581f,    0.13708383f,    0.09134148f,   -0.12617786f,
+                                  -0.06428341f,   0.09956831f,    0.1208086f,     -0.14676677f,  -0.0727722f,
+                                  0.1126304f,     0.010139365f,   0.015571211f,   -0.038128063f, 0.022913318f,
+                                  -0.042050496f,  0.16842307f,    -0.060597885f,  0.10531834f,   -0.06411776f,
+                                  -0.07451711f,   -0.03410368f,   -0.13393489f,   0.06534304f,   0.003620307f,
+                                  0.04490757f,    0.05970546f,    0.05197996f,    0.02839995f,   0.10434969f,
+                                  -0.013699693f,  -0.028353551f,  -0.07260381f,   0.047201227f,  -0.024575593f,
+                                  -0.036445823f,  0.07155557f,    0.009672501f,   -0.02328883f,  0.009533515f,
+                                  -0.03606021f,   -0.07421458f,   -0.028082801f,  -0.2678904f,   -0.13221288f,
+                                  0.18419984f,    -0.13012612f,   -0.014588381f,  -0.035059117f, -0.04824723f,
+                                  0.07830115f,    -0.056184657f,  0.03277091f,    0.025466874f,  0.14494097f,
+                                  -0.12522776f,   -0.098633975f,  -0.10766018f,   -0.08317623f,  0.08594209f,
+                                  0.07749552f,    0.039474737f,   0.1776665f,     -0.07409566f,  -0.0477268f,
+                                  0.29323658f,    0.10801441f,    0.1154011f,     0.013952499f,  0.10739139f,
+                                  0.10708251f,    -0.051456142f,  0.0074137426f,  -0.10430189f,  0.10034707f,
+                                  0.045594677f,   0.0635285f,     -0.0715442f,    -0.089667566f, -0.10811871f,
+                                  0.00026344223f, 0.08298446f,    -0.009525053f,  0.006585689f,  -0.24567553f,
+                                  -0.09450807f,   0.09648481f,    0.026996298f,   -0.06419476f,  -0.04752702f,
+                                  -0.11063944f,   -0.23441927f,   -0.17608605f,   -0.052156363f, 0.067035615f,
+                                  0.19271925f,    -0.0032889997f, -0.043264326f,  0.09663576f,   -0.057112187f,
+                                  -0.10100678f,   0.0628376f,     0.04447668f,    0.017961001f,  -0.10094388f,
+                                  -0.10190601f,   0.18335468f,    0.10494553f,    -0.052095775f, -0.0026118709f,
+                                  0.10539724f,    -0.04383912f,   -0.042349473f,  0.08438151f,   -0.1947263f,
+                                  0.02251204f,    0.11216432f,    -0.10307853f,   0.17351969f,   -0.039091777f,
+                                  0.08066188f,    -0.00561982f,   0.12633002f,    0.11335965f,   -0.0088127935f,
+                                  -0.019777594f,  0.06864014f,    -0.059751723f,  0.016233567f,  -0.06894641f,
+                                  -0.28651384f,   -0.004228674f,  0.019708522f,   -0.16305895f,  -0.07468996f,
+                                  -0.0855457f,    0.099339016f,   -0.07580735f,   -0.13775392f,  0.08434318f,
+                                  0.08330512f,    -0.12131499f,   0.031935584f,   0.09180414f,   -0.08876437f,
+                                  -0.08049874f,   0.008753825f,   0.03498998f,    0.030215185f,  0.03907079f,
+                                  0.089751154f,   0.029194152f,   -0.03337423f,   -0.019092513f, 0.04331237f,
+                                  0.04299654f,    -0.036394123f,  -0.12915532f,   0.09793732f,   0.07512415f,
+                                  -0.11319543f,   -0.032502122f,  0.15661901f,    0.07671967f,   -0.005491124f,
+                                  -0.19379048f,   -0.218606f,     0.21448623f,    0.017840758f,  0.1416943f,
+                                  -0.07051762f,   0.19488361f,    0.02664691f,    -0.18104725f,  -0.09334311f,
+                                  0.15026465f,    -0.15493552f,   -0.057762887f,  -0.11604192f,  -0.262013f,
+                                  -0.01391798f,   0.012185008f,   0.11156489f,    -0.07483202f,  0.06693364f,
+                                  -0.26151478f,   0.046425626f,   0.036540434f,   -0.16435726f,  0.17338543f,
+                                  -0.21401681f,   -0.11385144f,   -0.08283257f,   -0.069031075f, 0.030635102f,
+                                  0.010969227f,   0.11109743f,    0.010919218f,   0.027526086f,  0.13519906f,
+                                  0.01891392f,    -0.046839405f,  -0.040167913f,  0.017953383f,  -0.09700955f,
+                                  0.0061885654f,  -0.07000971f,   0.026893595f,   -0.038844477f, 0.14543656f
+                                 });
+
+    std::vector<float> projectionBiasVector(outputSize, 0.f);
+    auto projectionBias = MakeTensor<float,1>(tensorInfo16, projectionBiasVector);
+
+    armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
+    armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
+    armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo20x5);
+    armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo20x5);
+    armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo20x16);
+    armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo20x16);
+    armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo20x16);
+    armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo20x16);
+    armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo20);
+    armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo20);
+    armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo20);
+    armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo20);
+    armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo20);
+    armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo20);
+    armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo20);
+    armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo16x20);
+    armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo16);
+
+    AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
+    AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
+    AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
+    AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
+    AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
+    AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
+    AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
+    AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]);
+
+    data.m_InputToInputWeights = &inputToInputWeightsTensor;
+    data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
+    data.m_InputToCellWeights = &inputToCellWeightsTensor;
+    data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
+    data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
+    data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
+    data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
+    data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
+    data.m_CellToInputWeights = &cellToInputWeightsTensor;
+    data.m_InputGateBias = &inputGateBiasTensor;
+    data.m_ForgetGateBias = &forgetGateBiasTensor;
+    data.m_CellBias = &cellBiasTensor;
+    data.m_OutputGateBias = &outputGateBiasTensor;
+    data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
+    data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
+    data.m_ProjectionWeights = &projectionWeightsTensor;
+    data.m_ProjectionBias = &projectionBiasTensor;
+
+    // Flags to set test configuration
+    data.m_Parameters.m_ActivationFunc = 4;
+    data.m_Parameters.m_CifgEnabled = false;
+    data.m_Parameters.m_PeepholeEnabled = true;
+    data.m_Parameters.m_ProjectionEnabled = true;
+
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
+    inputHandle->Allocate();
+    outputStateInHandle->Allocate();
+    cellStateInHandle->Allocate();
+
+    scratchHandle->Allocate();
+    outputStateOutHandle->Allocate();
+    cellStateOutHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+    CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
+    CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+
+    return ret;
+
+}
+
+
+LayerTestResult<float, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                                   const boost::multi_array<float, 2>& input,
+                                                                   const boost::multi_array<float, 2>& outputExpected)
+{
+    bool cifgEnabled = true;
+    bool peepholeEnabled = true;
+    bool projectionEnabled = false;
+    // These are not the input and the output of Lstm yet
+    unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
+    unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
+
+    unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
+
+    const unsigned int cellSize = outputSize;
+
+    // Decide the shape of all input tensors
+    armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo cellStateInTensorInfo({batchSize, cellSize}, armnn::GetDataType<float>());
+
+    unsigned int scratchBufferSize = cifgEnabled ? cellSize * 4 : cellSize * 3;
+    armnn::TensorInfo scratchBufferTensorInfo({batchSize, scratchBufferSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo cellStateOutTensorInfo({batchSize, cellSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, armnn::GetDataType<float>());
+
+    // List of inputs
+    std::vector<float> inputData;
+    inputData.assign(input.data(), input.data() + batchSize*inputSize);
+    auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputData);
+
+    std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
+    auto outputStateInTensor = MakeTensor<float, 2>(outputStateInTensorInfo, outputStateInVector);
+
+    std::vector<float> cellStateInVector(batchSize * cellSize, 0.f);
+    auto cellStateInTensor = MakeTensor<float, 2>(cellStateInTensorInfo, cellStateInVector);
+
+
+    // Prepare all the weights in the descriptor for LSTM
+    armnn::LstmQueueDescriptor data;
+    armnn::TensorInfo tensorInfoInput({cellSize, inputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, armnn::GetDataType<float>());
+    armnn::TensorInfo tensorInfoNumUnits({cellSize}, armnn::GetDataType<float>());
+
+    auto inputToCellWeights = MakeTensor<float, 2>(tensorInfoInput,
+                                                     {-0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f,
+                                                     0.04717243f, 0.48944736f, -0.38535351f,
+                                                     -0.17212132f});
+    auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfoInput,
+                                                     {-0.55291498f, -0.42866567f, 0.13056988f,
+                                                       -0.3633365f, -0.22755712f, 0.28253698f, 0.24407166f,
+                                                       0.33826375f});
+    auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfoInput,
+                                                     {0.10725588f, -0.02335852f, -0.55932593f,
+                                                       -0.09426838f, -0.44257352f, 0.54939759f,
+                                                       0.01533556f, 0.42751634f});
+    auto cellBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
+    auto forgetGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {1.f, 1.f, 1.f, 1.f});
+    auto outputGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
+
+    auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfoOutput,
+                {0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f, 0.42957711f,
+                 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f, 0.20675004f,
+                 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f, 0.44901288f,
+                 0.21193194f});
+    auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfoOutput,
+                 {-0.13832897f, -0.0515101f, -0.2359007f, -0.16661474f, -0.14340827f,
+                  0.36986142f, 0.23414481f, 0.55899f, 0.10798943f, -0.41174671f, 0.17751795f,
+                  -0.34484994f, -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f});
+
+    auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfoOutput,
+                {0.41613156f, 0.42610586f, -0.16495961f, -0.5663873f, 0.30579174f, -0.05115908f,
+                 -0.33941799f, 0.23364776f, 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
+                 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f});
+
+    auto cellToForgetWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
+                {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f});
+    auto cellToOutputWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
+                {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f});
+
+    armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfoInput);
+    armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
+    armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfoInput);
+
+    armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfoNumUnits);
+    armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfoNumUnits);
+    armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfoNumUnits);
+
+    armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfoOutput);
+    armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
+    armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
+
+
+    armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
+    armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
+
+    AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
+
+    AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
+    AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
+    AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
+
+    AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
+    AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
+
+    AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
+    AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
+
+
+    data.m_InputToCellWeights = &inputToCellWeightsTensor;
+    data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
+    data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
+
+    data.m_CellBias = &cellBiasTensor;
+    data.m_ForgetGateBias = &forgetGateBiasTensor;
+    data.m_OutputGateBias = &outputGateBiasTensor;
+
+    data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
+    data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
+    data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
+
+    data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
+    data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
+
+    // other parameters for the descriptor
+    data.m_Parameters.m_CifgEnabled = cifgEnabled;
+    data.m_Parameters.m_ProjectionEnabled = projectionEnabled;
+    data.m_Parameters.m_PeepholeEnabled = peepholeEnabled;
+
+    data.m_Parameters.m_ActivationFunc = 4;
+    data.m_Parameters.m_ClippingThresProj = 0.0;
+    data.m_Parameters.m_ClippingThresCell = 0.0;
+
+
+    // List of outputs
+    std::vector<float> scratchBufferVector(batchSize * scratchBufferSize, 0.f);
+    auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
+    LayerTestResult<float, 2> ret0(scratchBufferTensorInfo);
+
+    // Output state for a certain time step
+    std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
+    auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
+    LayerTestResult<float, 2> ret1(outputStateOutTensorInfo);
+
+    // Cell state for a certain time step
+    std::vector<float> cellStateOutVector(batchSize * cellSize, 0.f);
+    auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
+    LayerTestResult<float, 2> ret2(cellStateOutTensorInfo);
+
+    // Output for a certain time step
+    std::vector<float> outputVector(batchSize * outputSize, 0.f);
+    auto outputTensor = MakeTensor<float, 2>(outputTensorInfo, outputVector);
+    std::vector<float> outputData;
+    outputData.assign(outputExpected.data(), outputExpected.data() + batchSize*outputSize);
+    LayerTestResult<float, 2> ret3(outputTensorInfo);
+    ret3.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputData);
+
+    // Prepare the inputs and outputs for the workload
+    std::unique_ptr<armnn::ITensorHandle> inputHandle =
+            workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
+            workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
+            workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> scratchBufferHandle =
+            workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
+            workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
+            workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle =
+            workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
+    AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
+
+    AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchBufferHandle.get());
+    AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
+    AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
+
+
+    inputHandle->Allocate();
+    outputStateInHandle->Allocate();
+    cellStateInHandle->Allocate();
+
+    scratchBufferHandle->Allocate();
+    outputStateOutHandle->Allocate();
+    cellStateOutHandle->Allocate();
+    outputHandle->Allocate();
+
+
+    CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
+    CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
+    CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
+
+    CopyDataToITensorHandle(scratchBufferHandle.get(), &scratchBufferTensor[0][0]);
+    CopyDataToITensorHandle(outputStateOutHandle.get(), &outputStateOutTensor[0][0]);
+    CopyDataToITensorHandle(cellStateOutHandle.get(), &cellStateOutTensor[0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret0.output[0][0], scratchBufferHandle.get());
+    CopyDataFromITensorHandle(&ret1.output[0][0], outputStateOutHandle.get());
+    CopyDataFromITensorHandle(&ret2.output[0][0], cellStateOutHandle.get());
+    CopyDataFromITensorHandle(&ret3.output[0][0], outputHandle.get());
+
+    return ret3;
+}
diff --git a/src/backends/backendsCommon/test/NormTestImpl.hpp b/src/backends/backendsCommon/test/NormTestImpl.hpp
new file mode 100644
index 0000000..0d8d434
--- /dev/null
+++ b/src/backends/backendsCommon/test/NormTestImpl.hpp
@@ -0,0 +1,343 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/Exceptions.hpp>
+#include <armnn/LayerSupport.hpp>
+#include "armnn/Types.hpp"
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+LayerTestResult<float,4> SimpleNormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                     armnn::NormalizationAlgorithmChannel normChannel,
+                                                     armnn::NormalizationAlgorithmMethod normMethod)
+{
+    const unsigned int inputHeight = 2;
+    const unsigned int inputWidth = 2;
+    const unsigned int inputChannels = 1;
+    const unsigned int inputNum = 2;
+
+    unsigned int outputHeight = inputHeight;
+    unsigned int outputWidth = inputWidth;
+    unsigned int outputChannels = inputChannels;
+    unsigned int outputNum = inputNum;
+
+    unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
+    unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
+
+    auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    LayerTestResult<float,4> ret(outputTensorInfo);
+
+    auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
+        // Batch #0
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        // Batch #1
+        5.0f, 6.0f,
+        7.0f, 8.0f
+    }));
+
+    float alpha = 1.f;
+    float beta = 1.f;
+    float kappa = 1.f;
+    uint32_t normSize = 3;
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::NormalizationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Parameters.m_NormChannelType = normChannel;
+    data.m_Parameters.m_NormMethodType = normMethod;
+    data.m_Parameters.m_NormSize = normSize;
+    data.m_Parameters.m_Alpha = alpha;
+    data.m_Parameters.m_Beta = beta;
+    data.m_Parameters.m_K = kappa;
+    data.m_Parameters.m_DataLayout = armnn::DataLayout::NCHW;
+
+    armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+    armnn::NormalizationQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    switch (normMethod)
+    {
+        case armnn::NormalizationAlgorithmMethod::LocalBrightness:
+        {
+            switch (normChannel)
+            {
+                case armnn::NormalizationAlgorithmChannel::Within:
+                {
+                    // When normalising within channels, the 3x3 kernel covers the entire 2x2 input at every index.
+                    // Therefore, all output values should equal the inputs, but divided by:
+                    // pow((kappa + (accumulatedScale * alpha)), beta)
+                    // ...where accumulatedScale is the sum of every element squared.
+                    float divisor[inputNum];
+                    for(int i = 0; i < boost::numeric_cast<int>(inputNum); i++)
+                    {
+                        float accumulatedScale = input[i][0][0][0]*input[i][0][0][0] +
+                                                 input[i][0][0][1]*input[i][0][0][1] +
+                                                 input[i][0][1][0]*input[i][0][1][0] +
+                                                 input[i][0][1][1]*input[i][0][1][1];
+                        divisor[i] = powf((kappa + accumulatedScale * alpha), beta);
+                    }
+                    ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
+                                                              std::vector<float>({input[0][0][0][0]/divisor[0],
+                                                                                  input[0][0][0][1]/divisor[0],
+                                                                                  input[0][0][1][0]/divisor[0],
+                                                                                  input[0][0][1][1]/divisor[0],
+                                                                                  input[1][0][0][0]/divisor[1],
+                                                                                  input[1][0][0][1]/divisor[1],
+                                                                                  input[1][0][1][0]/divisor[1],
+                                                                                  input[1][0][1][1]/divisor[1]}));
+                    break;
+                }
+                case armnn::NormalizationAlgorithmChannel::Across:
+                {
+                    // When normalising across channels, all output values should equal the inputs, but multiplied by:
+                    // pow((kappa + (accumulatedScale * alpha)), -beta)
+                    // ...where accumulatedScale is the sum of the inputs for adjacent channels for this element squared
+                    // ...where adjacent channels means within half the normSize for the channel
+                    // The test data has only one channel, so this is simplified below.
+                    std::vector<float> outputVector;
+                    for (int n = 0; n < boost::numeric_cast<int>(inputNum); ++n)
+                    {
+                        for (int h = 0; h < boost::numeric_cast<int>(inputHeight); ++h)
+                        {
+                            for (int w = 0; w < boost::numeric_cast<int>(inputWidth); ++w)
+                            {
+                                float accumulatedScale = input[n][0][h][w]*input[n][0][h][w];
+                                float scale = powf((kappa + accumulatedScale * alpha), -beta);
+                                outputVector.push_back(input[n][0][h][w] * scale);
+                            }
+                        }
+                    }
+                    ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputVector);
+                    break;
+                }
+                default:
+                {
+                    throw armnn::UnimplementedException("Unsupported normalisation channel type, "
+                                                        "only Across and Within are supported");
+                }
+            }
+            break;
+        }
+        case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
+        default:
+        {
+            throw armnn::UnimplementedException("Unsupported normalisation method type, "
+                                                "only LocalBrightness is supported");
+        }
+    }
+
+    return ret;
+}
+
+LayerTestResult<float,4> SimpleNormalizationNhwcTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                         armnn::NormalizationAlgorithmChannel normChannel,
+                                                         armnn::NormalizationAlgorithmMethod normMethod)
+{
+    const unsigned int inputHeight = 2;
+    const unsigned int inputWidth = 2;
+    const unsigned int inputChannels = 1;
+    const unsigned int inputNum = 2;
+
+    unsigned int outputHeight = inputHeight;
+    unsigned int outputWidth = inputWidth;
+    unsigned int outputChannels = inputChannels;
+    unsigned int outputNum = inputNum;
+
+    unsigned int inputShape[] = { inputNum, inputHeight, inputWidth, inputChannels };
+    unsigned int outputShape[] = { outputNum, outputHeight, outputWidth, outputChannels };
+
+    auto inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    auto outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    LayerTestResult<float,4> ret(outputTensorInfo);
+
+    auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
+        // Batch #0
+        1.0f, 2.0f,
+        3.0f, 4.0f,
+        // Batch #1
+        5.0f, 6.0f,
+        7.0f, 8.0f
+    }));
+
+    float alpha = 1.f;
+    float beta = 1.f;
+    float kappa = 1.f;
+    uint32_t normSize = 3;
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::NormalizationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Parameters.m_NormChannelType = normChannel;
+    data.m_Parameters.m_NormMethodType = normMethod;
+    data.m_Parameters.m_NormSize = normSize;
+    data.m_Parameters.m_Alpha = alpha;
+    data.m_Parameters.m_Beta = beta;
+    data.m_Parameters.m_K = kappa;
+    data.m_Parameters.m_DataLayout = armnn::DataLayout::NHWC;
+
+    armnn::PassthroughCpuTensorHandle refHandle(outputTensorInfo, &ret.outputExpected[0][0][0][0]);
+    armnn::NormalizationQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, &refHandle);
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    switch (normMethod)
+    {
+        case armnn::NormalizationAlgorithmMethod::LocalBrightness:
+        {
+            switch (normChannel)
+            {
+                case armnn::NormalizationAlgorithmChannel::Across:
+                {
+                    std::vector<float> expectedOutput{ 0.5f, 0.400000006f, 0.300000012f, 0.235294119f,
+                                                       0.192307696f, 0.16216217f, 0.140000001f, 0.123076923f };
+                    ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo, expectedOutput);
+                    break;
+                }
+                default:
+                {
+                    throw armnn::UnimplementedException("Unsupported normalisation channel type, "
+                                                        "Only Cross-map is supported for NHWC layout");
+                }
+            }
+            break;
+        }
+        case armnn::NormalizationAlgorithmMethod::LocalContrast: // NOTE: intentional fallthrough.
+        default:
+        {
+            throw armnn::UnimplementedException("Unsupported normalisation method type, "
+                                                "only LocalBrightness is supported");
+        }
+    }
+
+    return ret;
+}
+
+LayerTestResult<float,4> CompareNormalizationTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                      armnn::IWorkloadFactory& refWorkloadFactory,
+                                                      armnn::NormalizationAlgorithmChannel normChannel,
+                                                      armnn::NormalizationAlgorithmMethod normMethod)
+{
+    constexpr unsigned int inputNum = 5;
+    constexpr unsigned int inputChannels = 3;
+    constexpr unsigned int inputHeight = 32;
+    constexpr unsigned int inputWidth = 24;
+
+    constexpr unsigned int outputNum = inputNum;
+    constexpr unsigned int outputChannels = inputChannels;
+    constexpr unsigned int outputHeight = inputHeight;
+    constexpr unsigned int outputWidth = inputWidth;
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = {inputNum, inputChannels, inputHeight, inputWidth};
+    unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    LayerTestResult<float,4> ret(outputTensorInfo);
+
+    auto input = MakeRandomTensor<float, 4>(inputTensorInfo, 111234);
+
+    constexpr float alpha = 1.f;
+    constexpr float beta = 1.f;
+    constexpr float kappa = 1.f;
+    constexpr uint32_t normSize = 5;
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::NormalizationQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Parameters.m_NormChannelType = normChannel;
+    data.m_Parameters.m_NormMethodType  = normMethod;
+    data.m_Parameters.m_NormSize        = normSize;
+    data.m_Parameters.m_Alpha           = alpha;
+    data.m_Parameters.m_Beta            = beta;
+    data.m_Parameters.m_K               = kappa;
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
+
+    armnn::NormalizationQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+
+    // Don't execute if Normalization is not supported for the method and channel types, as an exception will be raised.
+    armnn::BackendId backend = workloadFactory.GetBackendId();
+    const size_t reasonIfUnsupportedMaxLen = 255;
+    char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
+    ret.supported = armnn::IsNormalizationSupported(backend, inputTensorInfo, outputTensorInfo, data.m_Parameters,
+                                                    reasonIfUnsupported, reasonIfUnsupportedMaxLen);
+    if (!ret.supported)
+    {
+        return ret;
+    }
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateNormalization(data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateNormalization(refData, refInfo);
+
+    outputHandleRef->Allocate();
+    inputHandleRef->Allocate();
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+    refWorkloadFactory.Finalize();
+    workloadRef->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&ret.outputExpected[0][0][0][0], outputHandleRef.get());
+
+    return ret;
+}
+
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
new file mode 100644
index 0000000..8d88241
--- /dev/null
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -0,0 +1,330 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include <armnn/ArmNN.hpp>
+
+#include <Graph.hpp>
+#include <Network.hpp>
+
+#include <reference/RefWorkloadFactory.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(OptimizedNetwork)
+
+BOOST_AUTO_TEST_CASE(SerializeToDot)
+{
+    armnn::Network net;
+
+    //Defines layers.
+    auto input = net.AddInputLayer(0);
+    auto add = net.AddAdditionLayer();
+    auto output = net.AddOutputLayer(0);
+
+    // Connects layers.
+    input->GetOutputSlot(0).Connect(add->GetInputSlot(0));
+    input->GetOutputSlot(0).Connect(add->GetInputSlot(1));
+    add->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    armnn::TensorShape shape({4});
+    armnn::TensorInfo info(shape, armnn::DataType::Float32);
+    input->GetOutputSlot(0).SetTensorInfo(info);
+    add->GetOutputSlot(0).SetTensorInfo(info);
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
+    armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
+
+    std::ostringstream ss;
+    optimizedNet->SerializeToDot(ss);
+
+    auto inputId = input->GetGuid();
+    auto addId = add->GetGuid();
+    auto outputId = output->GetGuid();
+
+    std::stringstream expected;
+    expected <<
+        "digraph Optimized {\n"
+        "    node [shape=\"record\"];\n"
+        "    edge [fontsize=8 fontcolor=\"blue\" fontname=\"arial-bold\"];\n"
+        "    " << inputId << " [label=\"{Input}\"];\n"
+        "    " << addId << " [label=\"{Addition}\"];\n"
+        "    " << outputId << " [label=\"{Output}\"];\n"
+        "    " << inputId << " -> " << addId << " [label=< [4] >];\n"
+        "    " << inputId << " -> " << addId << " [label=< [4] >];\n"
+        "    " << addId << " -> " << outputId << " [label=< [4] >];\n"
+        "}\n";
+
+    BOOST_TEST(ss.str() == expected.str());
+}
+
+BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
+{
+    // build up the structure of the network
+    armnn::INetworkPtr net(armnn::INetwork::Create());
+
+    armnn::IConnectableLayer* input = net->AddInputLayer(0);
+
+    // This layer configuration isn't supported by CpuAcc and isn't allowed to fall back, so Optimize will return null.
+    armnn::NormalizationDescriptor descriptor;
+    armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
+
+    armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+    input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
+    normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+    normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
+    armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+    BOOST_CHECK(!optNet);
+}
+
+BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerWithFallback)
+{
+    // build up the structure of the network
+    armnn::INetworkPtr net(armnn::INetwork::Create());
+
+    armnn::IConnectableLayer* input = net->AddInputLayer(0);
+
+    // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
+    armnn::NormalizationDescriptor descriptor;
+    armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
+
+    armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+    input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
+    normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+    normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
+    armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+    BOOST_REQUIRE(optNet);
+
+    for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+    {
+        // If NEON is enabled, Input and Output layers are supported by CpuAcc,
+        // the other layers are supported by CpuRef.
+        // If NEON is not enabled, all layers are supported by CpuRef.
+#if ARMCOMPUTENEON_ENABLED
+        if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
+        {
+            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
+        }
+        else if (layer->GetType() == armnn::LayerType::Normalization)
+        {
+            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        }
+#else
+        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+#endif
+    }
+}
+
+BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
+{
+    const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
+
+    armnn::Network  net;
+
+    armnn::NormalizationDescriptor nmDesc;
+    armnn::ActivationDescriptor acDesc;
+
+    //    in
+    //     |
+    //    nm
+    //   /  |
+    //  ac  |
+    //   \  |
+    //    ml
+    //     |
+    //    sm
+    //     |
+    //    ot
+    armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
+    layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+    armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
+
+    layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
+    normLayer->GetOutputSlot(0).SetTensorInfo(desc);
+
+    layer = net.AddActivationLayer(acDesc, "ac");
+
+    normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+    layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+    armnn::IConnectableLayer* prevLayer = layer;
+    layer = net.AddMultiplicationLayer("ml");
+
+    prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+    normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+    layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+    prevLayer = layer;
+    armnn::SoftmaxDescriptor softmaxDescriptor;
+    layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
+
+    prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+    layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+    prevLayer = layer;
+    layer = net.AddOutputLayer(0, "ot");
+
+    prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined };
+
+    armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
+    BOOST_CHECK(!optNet);
+
+}
+
+BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback)
+{
+    const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
+
+    armnn::Network  net;
+
+    armnn::NormalizationDescriptor nmDesc;
+    armnn::ActivationDescriptor acDesc;
+
+    //    in
+    //     |
+    //    nm
+    //   /  |
+    //  ac  |
+    //   \  |
+    //    ml
+    //     |
+    //    sm
+    //     |
+    //    ot
+    armnn::IConnectableLayer* layer = net.AddInputLayer(0, "in");
+    layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+    armnn::IConnectableLayer* const normLayer = net.AddNormalizationLayer(nmDesc, "nm");
+
+    layer->GetOutputSlot(0).Connect(normLayer->GetInputSlot(0));
+    normLayer->GetOutputSlot(0).SetTensorInfo(desc);
+
+    layer = net.AddActivationLayer(acDesc, "ac");
+
+    normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+    layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+    armnn::IConnectableLayer* prevLayer = layer;
+    layer = net.AddMultiplicationLayer("ml");
+
+    prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+    normLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
+    layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+    prevLayer = layer;
+    armnn::SoftmaxDescriptor softmaxDescriptor;
+    layer = net.AddSoftmaxLayer(softmaxDescriptor, "sm");
+
+    prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+    layer->GetOutputSlot(0).SetTensorInfo(desc);
+
+    prevLayer = layer;
+    layer = net.AddOutputLayer(0, "ot");
+
+    prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
+
+    armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(net, backends, runtime->GetDeviceSpec());
+    BOOST_CHECK(optNet);
+
+    // validate workloads
+    armnn::RefWorkloadFactory fact;
+    for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+    {
+        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        BOOST_CHECK_NO_THROW(
+            layer->CreateWorkload(static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph(), fact));
+    }
+}
+
+BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback)
+{
+    // build up the structure of the network
+    armnn::INetworkPtr net(armnn::INetwork::Create());
+
+    armnn::IConnectableLayer* input = net->AddInputLayer(0);
+
+    // This layer configuration isn't supported by CpuAcc but it allows to fallback to CpuRef.
+    armnn::NormalizationDescriptor descriptor;
+    armnn::IConnectableLayer* normalize = net->AddNormalizationLayer(descriptor);
+
+    armnn::IConnectableLayer* output = net->AddOutputLayer(0);
+
+    input->GetOutputSlot(0).Connect(normalize->GetInputSlot(0));
+    normalize->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+    input->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+    normalize->GetOutputSlot(0).SetTensorInfo(armnn::TensorInfo({ 1, 1, 4, 4 }, armnn::DataType::Float32));
+
+    armnn::IRuntime::CreationOptions options;
+    armnn::IRuntimePtr runtime(armnn::IRuntime::Create(options));
+
+    std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc,
+                                             armnn::Compute::GpuAcc,
+                                             armnn::Compute::CpuRef };
+
+    armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
+    BOOST_REQUIRE(optNet);
+
+    for (auto&& layer : static_cast<armnn::OptimizedNetwork*>(optNet.get())->GetGraph())
+    {
+        // If NEON is enabled, Input and Output layers are supported by CpuAcc,
+        // the other layers are supported by CpuRef.
+        // If only CL is enabled, Input and Output layers are supported by GpuAcc,
+        // the other layers are supported by CpuRef.
+        // If neither NEON, nor CL is enabled, all layers are supported by CpuRef.
+#if ARMCOMPUTENEON_ENABLED
+        if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
+        {
+            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
+        }
+        else if (layer->GetType() == armnn::LayerType::Normalization)
+        {
+            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        }
+#elif ARMCOMPUTECL_ENABLED
+        if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
+        {
+            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
+        }
+        else if (layer->GetType() == armnn::LayerType::Normalization)
+        {
+            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        }
+#else
+        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+#endif
+    }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/test/PermuteTestImpl.hpp b/src/backends/backendsCommon/test/PermuteTestImpl.hpp
new file mode 100644
index 0000000..529f9d3
--- /dev/null
+++ b/src/backends/backendsCommon/test/PermuteTestImpl.hpp
@@ -0,0 +1,225 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "QuantizeHelper.hpp"
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+template<typename T>
+LayerTestResult<T, 4> SimplePermuteTestImpl(
+        armnn::IWorkloadFactory& workloadFactory,
+        armnn::PermuteDescriptor descriptor,
+        armnn::TensorInfo inputTensorInfo,
+        armnn::TensorInfo outputTensorInfo,
+        const std::vector<T>& inputData,
+        const std::vector<T>& outputExpectedData)
+{
+    auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    LayerTestResult<T, 4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::PermuteQueueDescriptor data;
+    data.m_Parameters = descriptor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePermute(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+LayerTestResult<float, 4> SimplePermuteFloat32TestCommon(armnn::IWorkloadFactory& workloadFactory)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { 1, 2, 2, 2 };
+    unsigned int outputShape[] = { 1, 2, 2, 2 };
+
+    armnn::PermuteDescriptor descriptor;
+    descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    std::vector<float> input = std::vector<float>(
+            {
+                    1.0f, 2.0f,
+                    3.0f, 4.0f,
+
+                    5.0f, 6.0f,
+                    7.0f, 8.0f
+            });
+
+    std::vector<float> outputExpected = std::vector<float>(
+            {
+                    1.0f, 5.0f, 2.0f, 6.0f,
+                    3.0f, 7.0f, 4.0f, 8.0f
+            });
+
+    return SimplePermuteTestImpl<float>(workloadFactory, descriptor, inputTensorInfo,
+                                        outputTensorInfo, input, outputExpected);
+}
+
+LayerTestResult<uint8_t, 4> SimplePermuteUint8TestCommon(armnn::IWorkloadFactory& workloadFactory)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { 1, 2, 2, 2 };
+    unsigned int outputShape[] = { 1, 2, 2, 2 };
+
+    armnn::PermuteDescriptor descriptor;
+    descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::QuantisedAsymm8);
+    inputTensorInfo.SetQuantizationScale(1.0f);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::QuantisedAsymm8);
+    outputTensorInfo.SetQuantizationScale(1.0f);
+
+    std::vector<uint8_t> input = std::vector<uint8_t>(
+            {
+                    1, 2,
+                    3, 4,
+
+                    5, 6,
+                    7, 8
+            });
+
+    std::vector<uint8_t> outputExpected = std::vector<uint8_t>(
+            {
+                    1, 5, 2, 6,
+                    3, 7, 4, 8
+            });
+
+    return SimplePermuteTestImpl<uint8_t>(workloadFactory, descriptor, inputTensorInfo,
+                                          outputTensorInfo, input, outputExpected);
+}
+
+LayerTestResult<float, 4>
+PermuteFloat32ValueSet1TestCommon(armnn::IWorkloadFactory& workloadFactory)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[]  = { 1, 2, 2, 3 };
+    unsigned int outputShape[] = { 1, 3, 2, 2 };
+
+    armnn::PermuteDescriptor descriptor;
+    descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    std::vector<float> input = std::vector<float>(
+            {
+                    1.0f,   2.0f,  3.0f,
+                    11.0f, 12.0f, 13.0f,
+                    21.0f, 22.0f, 23.0f,
+                    31.0f, 32.0f, 33.0f,
+            });
+
+    std::vector<float> outputExpected = std::vector<float>(
+            {
+                    1.0f, 11.0f, 21.0f, 31.0f,
+                    2.0f, 12.0f, 22.0f, 32.0f,
+                    3.0f, 13.0f, 23.0f, 33.0f,
+            });
+
+    return SimplePermuteTestImpl<float>(workloadFactory, descriptor, inputTensorInfo,
+                                        outputTensorInfo, input, outputExpected);
+}
+
+LayerTestResult<float, 4>
+PermuteFloat32ValueSet2TestCommon(armnn::IWorkloadFactory& workloadFactory)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[]  = { 1, 3, 2, 2 };
+    unsigned int outputShape[] = { 1, 2, 2, 3 };
+
+    armnn::PermuteDescriptor descriptor;
+    descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    std::vector<float> input = std::vector<float>(
+            {
+                1.0f, 11.0f, 21.0f, 31.0f,
+                2.0f, 12.0f, 22.0f, 32.0f,
+                3.0f, 13.0f, 23.0f, 33.0f,
+            });
+
+    std::vector<float> outputExpected = std::vector<float>(
+            {
+                1.0f,   2.0f,  3.0f,
+                11.0f, 12.0f, 13.0f,
+                21.0f, 22.0f, 23.0f,
+                31.0f, 32.0f, 33.0f,
+            });
+
+    return SimplePermuteTestImpl<float>(workloadFactory, descriptor, inputTensorInfo,
+                                        outputTensorInfo, input, outputExpected);
+}
+
+LayerTestResult<float, 4>
+PermuteFloat32ValueSet3TestCommon(armnn::IWorkloadFactory& workloadFactory)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[]  = { 1, 2, 3, 3 };
+    unsigned int outputShape[] = { 1, 3, 2, 3 };
+
+    armnn::PermuteDescriptor descriptor;
+    descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    std::vector<float> input = std::vector<float>(
+            {
+                1.0f,   2.0f,  3.0f,
+                11.0f, 12.0f, 13.0f,
+                21.0f, 22.0f, 23.0f,
+                31.0f, 32.0f, 33.0f,
+                41.0f, 42.0f, 43.0f,
+                51.0f, 52.0f, 53.0f,
+            });
+
+    std::vector<float> outputExpected = std::vector<float>(
+            {
+                1.0f, 11.0f, 21.0f, 31.0f, 41.0f, 51.0f,
+                2.0f, 12.0f, 22.0f, 32.0f, 42.0f, 52.0f,
+                3.0f, 13.0f, 23.0f, 33.0f, 43.0f, 53.0f,
+            });
+
+    return SimplePermuteTestImpl<float>(workloadFactory, descriptor, inputTensorInfo,
+                                        outputTensorInfo, input, outputExpected);
+}
diff --git a/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp
new file mode 100644
index 0000000..ded45ab
--- /dev/null
+++ b/src/backends/backendsCommon/test/Pooling2dTestImpl.hpp
@@ -0,0 +1,1240 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "QuantizeHelper.hpp"
+
+#include <armnn/ArmNN.hpp>
+
+#include <Permute.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <backendsCommon/WorkloadInfo.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+#include <boost/numeric/conversion/cast.hpp>
+
+#include <algorithm>
+#include <string>
+
+template<typename T>
+LayerTestResult<T, 4> SimplePooling2dTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                              armnn::Pooling2dDescriptor descriptor,
+                                              float qScale,
+                                              int32_t qOffset,
+                                              const boost::multi_array<T, 4>& input,
+                                              const boost::multi_array<T, 4>& outputExpected)
+{
+    const armnn::DataLayoutIndexed dataLayout = descriptor.m_DataLayout;
+    auto heightIndex = dataLayout.GetHeightIndex();
+    auto widthIndex = dataLayout.GetWidthIndex();
+    auto channelsIndex = dataLayout.GetChannelsIndex();
+
+    unsigned int inputHeight     = boost::numeric_cast<unsigned int>(input.shape()[heightIndex]);
+    unsigned int inputWidth      = boost::numeric_cast<unsigned int>(input.shape()[widthIndex]);
+    unsigned int inputChannels   = boost::numeric_cast<unsigned int>(input.shape()[channelsIndex]);
+    unsigned int inputBatchSize  = boost::numeric_cast<unsigned int>(input.shape()[0]);
+
+    unsigned int outputHeight    = boost::numeric_cast<unsigned int>(outputExpected.shape()[heightIndex]);
+    unsigned int outputWidth     = boost::numeric_cast<unsigned int>(outputExpected.shape()[widthIndex]);
+    unsigned int outputChannels  = boost::numeric_cast<unsigned int>(outputExpected.shape()[channelsIndex]);
+    unsigned int outputBatchSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[0]);
+
+    armnn::TensorInfo inputTensorInfo = GetTensorInfo<T>(inputBatchSize, inputChannels, inputHeight,
+                                                         inputWidth, dataLayout);
+    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(outputBatchSize, outputChannels, outputHeight,
+                                                          outputWidth, dataLayout);
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::Pooling2dQueueDescriptor queueDescriptor;
+    queueDescriptor.m_Parameters = descriptor;
+    queueDescriptor.m_Parameters.m_DataLayout = dataLayout;
+
+    armnn::WorkloadInfo workloadInfo;
+    AddInputToWorkload(queueDescriptor, workloadInfo, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(queueDescriptor, workloadInfo, outputTensorInfo, outputHandle.get());
+
+    // Don't execute if Pooling is not supported, as an exception will be raised.
+    armnn::BackendId backend = workloadFactory.GetBackendId();
+    const size_t reasonIfUnsupportedMaxLen = 255;
+    char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
+    result.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
+                                                   queueDescriptor.m_Parameters,
+                                                   reasonIfUnsupported, reasonIfUnsupportedMaxLen);
+    if (!result.supported)
+    {
+        return result;
+    }
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(queueDescriptor, workloadInfo);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+
+    result.outputExpected = outputExpected;
+
+    return result;
+}
+
+//
+// Tests max pooling with the following parameters:
+//
+//   Pooling size: 3x3
+//   Stride:       (2,4)
+//   input size:   8x13
+//   channels:     2
+//   batch size:   2
+//
+template<typename T>
+LayerTestResult<T, 4> SimpleMaxPooling2dSize3x3Stride2x4TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                                   bool forceNoPadding,
+                                                                   float qScale = 1.0f,
+                                                                   int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
+    descriptor.m_StrideX = 2;
+    descriptor.m_StrideY = 4;
+    // forceNoPadding is mainly used for compatibility with ARM Compute.
+    // As of 16/05/2017, it errors if padX or padY are equal to or greater than the pool size.
+    descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
+    descriptor.m_PadTop = descriptor.m_PadBottom = 0;
+    descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+
+    unsigned int inputWidth = 8;
+    unsigned int inputHeight = 13;
+    unsigned int outputWidth =
+        (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
+        descriptor.m_StrideX;
+    unsigned int outputHeight =
+        (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
+        descriptor.m_StrideY;
+    unsigned int channels = 2;
+    unsigned int batchSize = 2;
+
+    armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    std::vector<float> singleChannelData({
+        0.0f, 4.0f, 8.0f, 1.0f, 6.0f, 4.0f, 5.0f, 8.0f,
+        1.0f, 1.0f, 6.0f, 0.0f, 3.0f, 7.0f, 4.0f, 7.0f,
+        8.0f, 5.0f, 0.0f, 0.0f, 8.0f, 3.0f, 4.0f, 3.0f,
+        8.0f, 2.0f, 5.0f, 4.0f, 1.0f, 9.0f, 2.0f, 0.0f,
+        5.0f, 4.0f, 5.0f, 0.0f, 0.0f, 0.0f, 7.0f, 2.0f,
+        1.0f, 2.0f, 6.0f, 2.0f, 7.0f, 9.0f, 5.0f, 2.0f,
+        9.0f, 7.0f, 3.0f, 1.0f, 3.0f, 4.0f, 8.0f, 3.0f,
+        1.0f, 0.0f, 0.0f, 5.0f, 5.0f, 4.0f, 2.0f, 0.0f,
+        6.0f, 4.0f, 3.0f, 6.0f, 9.0f, 5.0f, 5.0f, 6.0f,
+        8.0f, 7.0f, 9.0f, 6.0f, 1.0f, 4.0f, 1.0f, 9.0f,
+        7.0f, 1.0f, 9.0f, 2.0f, 9.0f, 9.0f, 8.0f, 1.0f,
+        4.0f, 4.0f, 5.0f, 9.0f, 2.0f, 6.0f, 6.0f, 4.0f,
+        3.0f, 5.0f, 4.0f, 0.0f, 1.0f, 5.0f, 9.0f, 7.0f,
+    });
+
+    // Constructs input data.
+    std::vector<float> inputData;
+    auto negator = [](float f) { return -f; };
+
+    // First image (two channels where the second channel is the negative of the first one).
+    inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
+    std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
+
+    // Second image (same as first image).
+    inputData.insert(inputData.end(), singleChannelData.begin(), singleChannelData.end());
+    std::transform(singleChannelData.begin(), singleChannelData.end(), std::back_inserter(inputData), negator);
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+
+    // These were calculated manually.
+    auto shape(GetTensorShapeAsArray<4>(outputTensorInfo));
+    boost::multi_array<T, 4> outputExpected(shape);
+    if (forceNoPadding)
+    {
+        outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+            QuantizedVector<T>(qScale, qOffset, {
+                 8.0f,  8.0f,  8.0f,
+                 9.0f,  7.0f,  9.0f,
+                 9.0f,  9.0f,  9.0f,
+
+                 0.0f,  0.0f, -3.0f,
+                -1.0f,  0.0f,  0.0f,
+                -1.0f, -1.0f, -1.0f,
+
+                 8.0f,  8.0f,  8.0f,
+                 9.0f,  7.0f,  9.0f,
+                 9.0f,  9.0f,  9.0f,
+
+                 0.0f,  0.0f, -3.0f,
+                -1.0f,  0.0f,  0.0f,
+                -1.0f, -1.0f, -1.0f
+        }));
+    }
+    else
+    {
+        outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+            QuantizedVector<T>(qScale, qOffset, {
+                0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
+                0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
+                0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
+
+                0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
+                0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f,
+
+                0.0f, 8.0f, 8.0f, 8.0f, 8.0f, 8.0f,
+                0.0f, 9.0f, 7.0f, 9.0f, 9.0f, 3.0f,
+                0.0f, 8.0f, 9.0f, 9.0f, 9.0f, 9.0f,
+
+                0.0f, 0.0f, 0.0f, 0.0f,-3.0f, 0.0f,
+                0.0f,-1.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+                0.0f,-1.0f,-1.0f,-1.0f,-1.0f, 0.0f
+        }));
+    }
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> SimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                   const armnn::DataLayoutIndexed& dataLayout = armnn::DataLayout::NCHW,
+                                                   float qScale = 1.0f,
+                                                   int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
+    descriptor.m_StrideX = descriptor.m_StrideY = 2;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+    descriptor.m_DataLayout = dataLayout;
+
+    armnn::TensorInfo inputTensorInfo  = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    std::vector<T> inputData(
+        QuantizedVector<T>(qScale, qOffset, {
+             1.0f,  2.0f,  5.0f,  6.0f,
+             3.0f,  4.0f,  7.0f,  8.0f,
+             9.0f, 10.0f, 13.0f, 14.0f,
+            11.0f, 12.0f, 15.0f, 16.0f,
+
+            17.0f, 18.0f, 21.0f, 22.0f,
+            19.0f, 20.0f, 23.0f, 24.0f,
+            25.0f, 26.0f, 29.0f, 30.0f,
+            27.0f, 28.0f, 31.0f, 32.0f,
+        }));
+
+    std::vector<T> outputData(
+        QuantizedVector<T>(qScale, qOffset, {
+             4.0f,  8.0f,
+            12.0f, 16.0f,
+
+            20.0f, 24.0f,
+            28.0f, 32.0f,
+        }));
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<T> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+        inputData = tmp;
+
+        std::vector<T> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> SimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                       armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
+                                                       float qScale = 1.0f,
+                                                       int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
+    descriptor.m_StrideX = descriptor.m_StrideY = 2;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+    descriptor.m_DataLayout = dataLayout;
+
+    armnn::TensorInfo inputTensorInfo  = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    std::vector<T> inputData(
+        QuantizedVector<T>(qScale, qOffset, {
+             2.0f,  2.0f,  6.0f,  6.0f,
+             4.0f,  4.0f,  8.0f,  8.0f,
+            10.0f, 12.0f, 14.0f, 16.0f,
+            10.0f, 12.0f, 16.0f, 14.0f,
+
+            18.0f, 20.0f, 24.0f, 22.0f,
+            20.0f, 18.0f, 22.0f, 24.0f,
+            26.0f, 28.0f,  0.0f,  0.0f,
+            26.0f, 28.0f,  0.0f,  0.0f,
+        }));
+
+    std::vector<T> outputData(
+        QuantizedVector<T>(qScale, qOffset, {
+             3.0f,  7.0f,
+            11.0f, 15.0f,
+
+            19.0f, 23.0f,
+            27.0f,  0.0f,
+        }));
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<T> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+        inputData = tmp;
+
+        std::vector<T> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> LargeTensorsAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                             float qScale = 1.0f,
+                                                             int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 100;
+    descriptor.m_StrideX = descriptor.m_StrideY = 5;
+    descriptor.m_PadLeft = 50;
+    descriptor.m_PadRight = 50;
+    descriptor.m_PadTop = 50;
+    descriptor.m_PadBottom = 50;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+
+    armnn::TensorInfo inputTensorInfo({ 5, 3, 52, 60 }, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({ 5, 3, 11, 13 }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    std::vector<T> inputVec;
+
+    for (unsigned int i = 0 ; i < inputTensorInfo.GetShape().GetNumElements(); ++i)
+    {
+        inputVec.push_back(1);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, inputVec);
+
+    std::vector<T> outputVec;
+
+    for (unsigned int i = 0 ; i < outputTensorInfo.GetShape().GetNumElements(); ++i)
+    {
+        outputVec.push_back(1);
+    }
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputVec);
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> SimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                  armnn::DataLayoutIndexed dataLayout = armnn::DataLayout::NCHW,
+                                                  float qScale = 1.0f,
+                                                  int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
+    descriptor.m_StrideX = descriptor.m_StrideY = 2;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+    descriptor.m_DataLayout = dataLayout;
+
+    armnn::TensorInfo inputTensorInfo  = GetTensorInfo<T>(1, 2, 4, 4, dataLayout);
+    armnn::TensorInfo outputTensorInfo = GetTensorInfo<T>(1, 2, 2, 2, dataLayout);
+
+    std::vector<T> inputData(
+        QuantizedVector<T>(qScale, qOffset, {
+            1.0f, 7.0f, 5.0f, 5.0f,
+            1.0f, 7.0f, 5.0f, 5.0f,
+            3.0f, 3.0f, 1.0f, 1.0f,
+            3.0f, 3.0f, 1.0f, 1.0f,
+
+            1.0f, 7.0f, 0.0f, 0.0f,
+            1.0f, 7.0f, 2.0f, 0.0f,
+            0.0f, 2.0f, 1.0f, 1.0f,
+            0.0f, 0.0f, 1.0f, 1.0f,
+        }));
+
+    std::vector<T> outputData(
+        QuantizedVector<T>(qScale, qOffset, {
+            5.0f, 5.0f,
+            3.0f, 1.0f,
+
+            5.0f, 1.0f,
+            1.0f, 1.0f,
+        }));
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout.GetDataLayout() == armnn::DataLayout::NHWC)
+    {
+        std::vector<T> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data());
+        inputData = tmp;
+
+        std::vector<T> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data());
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputData);
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> L2Pooling2dSize3Stride1TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                        float qScale = 1.0f,
+                                                        int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
+    descriptor.m_StrideX = descriptor.m_StrideY = 1;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            2.0f, 1.0f, 5.0f, 2.0f,
+            1.0f, 2.0f, 2.0f, 1.0f,
+            5.0f, 4.0f, 1.0f, 5.0f,
+            2.0f, 1.0f, 5.0f, 2.0f,
+        }));
+
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            3.0f, 3.0f,
+            3.0f, 3.0f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> L2Pooling2dSize3Stride3TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                        float qScale = 1.0f,
+                                                        int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
+    descriptor.m_StrideX = descriptor.m_StrideY = 3;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
+            1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
+            5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
+            2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
+            1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
+            5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
+            2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
+            1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
+            5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
+        }));
+
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            3.0f, 3.0f, 3.0f,
+            3.0f, 3.0f, 3.0f,
+            3.0f, 3.0f, 3.0f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> L2Pooling2dSize3Stride4TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                        float qScale = 1.0f,
+                                                        int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
+    descriptor.m_StrideX = descriptor.m_StrideY = 4;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
+            1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
+            5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
+            0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+            2.0f, 1.0f, 5.0f, 0.0f, 2.0f, 1.0f, 5.0f,
+            1.0f, 2.0f, 2.0f, 0.0f, 1.0f, 2.0f, 2.0f,
+            5.0f, 4.0f, 1.0f, 0.0f, 5.0f, 4.0f, 1.0f,
+        }));
+
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            3.0f, 3.0f,
+            3.0f, 3.0f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> L2Pooling2dSize7TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                 float qScale = 1.0f,
+                                                 int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 7;
+    descriptor.m_StrideX = descriptor.m_StrideY = 7;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 7, 7 }, armnn::GetDataType<T>());
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            1.0f, 0.0f, 2.0f, 0.0f,  3.0f, 0.0f, 4.0f,
+            0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
+            0.0f, 5.0f, 0.0f, 6.0f,  0.0f, 7.0f, 0.0f,
+            8.0f, 0.0f, 9.0f, 0.0f, 10.0f, 0.0f, 5.0f,
+            0.0f, 5.0f, 0.0f, 2.0f,  0.0f, 1.0f, 1.0f,
+            0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
+            0.0f, 0.0f, 0.0f, 0.0f,  0.0f, 0.0f, 0.0f,
+        }));
+
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            3.0f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> L2Pooling2dSize9TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                 float qScale = 1.0f,
+                                                 int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 9;
+    descriptor.m_StrideX = descriptor.m_StrideY = 9;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 9, 9 }, armnn::GetDataType<T>());
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
+            1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
+            5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
+            2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
+            1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
+            5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
+            2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f, 2.0f, 1.0f, 5.0f,
+            1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f, 1.0f, 2.0f, 2.0f,
+            5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f, 5.0f, 4.0f, 1.0f,
+        }));
+
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 1, 1 }, armnn::GetDataType<T>());
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            3.0f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> AsymmetricNonSquarePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                             float qScale = 1.0f,
+                                                             int32_t qOffset = 0)
+{
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 1, 3 }, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+    descriptor.m_PoolWidth = 2;
+    descriptor.m_PoolHeight = 3;
+    descriptor.m_StrideX = 2;
+    descriptor.m_StrideY = 1;
+    descriptor.m_PadLeft = 2;
+    descriptor.m_PadRight = 0;
+    descriptor.m_PadTop = 1;
+    descriptor.m_PadBottom = 2;
+    descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+
+    // Construct input data.
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            1.0f, 3.0f, 4.0f,
+        }));
+
+    // These were calculated manually.
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            0.0f, 3.0f, 0.0f, 3.0f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> ComparePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                 armnn::IWorkloadFactory& refWorkloadFactory,
+                                                 armnn::PoolingAlgorithm poolingType,
+                                                 float qScale = 1.0f,
+                                                 int32_t qOffset = 0)
+{
+    const unsigned int inputWidth = 16;
+    const unsigned int inputHeight = 32;
+    const unsigned int channelCount = 2;
+    const unsigned int batchSize = 5;
+
+    const unsigned int poolSize = 3;
+    const unsigned int strideX = 2;
+    const unsigned int strideY = 4;
+    const unsigned int padX = 0;
+    const unsigned int padY = 0;
+
+    const unsigned int outputWidth = (inputWidth + 2 * padX + strideX - poolSize) / strideX;
+    const unsigned int outputHeight = (inputHeight + 2 * padY + strideY - poolSize) / strideY;
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { batchSize, channelCount, inputHeight, inputWidth };
+    unsigned int outputShape[] = { batchSize, channelCount, outputHeight, outputWidth };
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::GetDataType<T>());
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    boost::multi_array<T, 4> input = MakeRandomTensor<T, 4>(inputTensorInfo, 81715);
+
+    LayerTestResult<T, 4> comparisonResult(outputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::Pooling2dQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+    data.m_Parameters.m_PoolType = poolingType;
+    data.m_Parameters.m_PoolWidth = poolSize;
+    data.m_Parameters.m_PoolHeight = poolSize;
+    data.m_Parameters.m_StrideX = strideX;
+    data.m_Parameters.m_StrideY = strideY;
+    data.m_Parameters.m_PadLeft = padX;
+    data.m_Parameters.m_PadRight = padX;
+    data.m_Parameters.m_PadTop = padY;
+    data.m_Parameters.m_PadBottom = padY;
+    data.m_Parameters.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
+
+    // Don't execute if Pooling is not supported, as an exception will be raised.
+    armnn::BackendId backend = workloadFactory.GetBackendId();
+    const size_t reasonIfUnsupportedMaxLen = 255;
+    char reasonIfUnsupported[reasonIfUnsupportedMaxLen+1];
+    comparisonResult.supported = armnn::IsPooling2dSupported(backend, inputTensorInfo, outputTensorInfo,
+                                                             data.m_Parameters,
+                                                             reasonIfUnsupported, reasonIfUnsupportedMaxLen);
+    if (!comparisonResult.supported)
+    {
+        return comparisonResult;
+    }
+
+    armnn::Pooling2dQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreatePooling2d(data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreatePooling2d(refData, refInfo);
+
+    outputHandleRef->Allocate();
+    inputHandleRef->Allocate();
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+    workloadRef->Execute();
+
+    CopyDataFromITensorHandle(&comparisonResult.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&comparisonResult.outputExpected[0][0][0][0], outputHandleRef.get());
+
+    return comparisonResult;
+}
+
+//
+// Tests max pooling with the following parameters:
+//
+//   Pooling size: 2x2
+//   Stride:       (2,2)
+//   input size:   4x4
+//   channels:     1
+//   batch size:   1
+//
+template<typename T>
+LayerTestResult<T, 4> SimpleMaxPooling2dSize2x2Stride2x2TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                                   bool forceNoPadding,
+                                                                   float qScale = 1.0f,
+                                                                   int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
+    descriptor.m_StrideX = 2;
+    descriptor.m_StrideY = 2;
+    descriptor.m_PadLeft = descriptor.m_PadRight = forceNoPadding ? 0 : 3;
+    descriptor.m_PadTop = descriptor.m_PadBottom = 0;
+    descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::Exclude;
+
+    unsigned int inputWidth = 4;
+    unsigned int inputHeight = 4;
+    unsigned int outputWidth =
+        (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
+        descriptor.m_StrideX;
+    unsigned int outputHeight =
+        (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
+        descriptor.m_StrideY;
+    unsigned int channels = 1;
+    unsigned int batchSize = 1;
+
+    std::vector<float> inputData = {
+        510.0f, 222.0f, 780.0f, 654.0f,
+        141.0f, 276.0f,  15.0f, 546.0f,
+        303.0f, 618.0f, 582.0f, 339.0f,
+        438.0f, 564.0f, 573.0f, 402.0f
+    };
+
+    // Note that left and right edges will be 0.f, due to the 2x2 max pooling only accessing zeros here.
+    std::vector<float> expectedOutputDataWithPadding = {
+        0.0f, 510.0f, 780.0f, 654.0f, 0.0f,
+        0.0f, 438.0f, 618.0f, 402.0f, 0.0f
+    };
+
+    std::vector<float> expectedOutputDataNoPadding = {
+        510.0f, 780.0f,
+        618.0f, 582.0f
+    };
+
+    armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
+
+    // Scale and offset should match input - we're just calculating maximum values.
+    armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
+                         QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+//
+// Tests max pooling with the following parameters:
+//
+//   Pooling size: 3x2
+//   Stride:       (2,2)
+//   input size:   3x2
+//   channels:     1
+//   batch size:   1
+//
+template<typename T>
+LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3x2Stride2x2TestCommon(
+        armnn::IWorkloadFactory& workloadFactory,
+        bool forceNoPadding,
+        float qScale = 1.0f,
+        int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
+    descriptor.m_PoolWidth = 3;
+    descriptor.m_PoolHeight = 2;
+    descriptor.m_StrideX = 2;
+    descriptor.m_StrideY = 2;
+    descriptor.m_PadLeft = (forceNoPadding) ? 0 : 1;
+    descriptor.m_PadRight = descriptor.m_PadLeft;
+    descriptor.m_PadTop = 0;
+    descriptor.m_PadBottom = 0;
+    descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+
+    unsigned int inputWidth = 3;
+    unsigned int inputHeight = 2;
+    unsigned int outputWidth =
+        (inputWidth + descriptor.m_PadLeft + descriptor.m_PadRight + descriptor.m_StrideX - descriptor.m_PoolWidth) /
+        descriptor.m_StrideX;
+    unsigned int outputHeight =
+        (inputHeight + descriptor.m_PadTop + descriptor.m_PadBottom + descriptor.m_StrideY - descriptor.m_PoolHeight) /
+        descriptor.m_StrideY;
+    unsigned int channels = 1;
+    unsigned int batchSize = 1;
+
+    std::vector<float> inputData = {
+        3.0f, 6.0f, 9.0f,
+        12.0f, 15.0f, 18.0f,
+    };
+
+    std::vector<float> expectedOutputDataWithPadding = {
+        6.0f, 8.0f,
+    };
+
+    std::vector<float> expectedOutputDataNoPadding = {
+        10.5f,
+    };
+
+    armnn::TensorInfo inputTensorInfo({ batchSize, channels, inputHeight, inputWidth }, armnn::GetDataType<T>());
+
+    // Scale and offset should match input - we're just calculating average values.
+    armnn::TensorInfo outputTensorInfo({ batchSize, channels, outputHeight, outputWidth }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(qScale, qOffset, inputData));
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        forceNoPadding ? QuantizedVector<T>(qScale, qOffset, expectedOutputDataNoPadding) :
+                         QuantizedVector<T>(qScale, qOffset, expectedOutputDataWithPadding));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+
+template<typename T>
+LayerTestResult<T, 4> IgnorePaddingSimpleMaxPooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                            float qScale = 1.0f,
+                                                            int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
+    descriptor.m_StrideX = descriptor.m_StrideY = 2;
+    descriptor.m_PadLeft = 1;
+    descriptor.m_PadRight = 1;
+    descriptor.m_PadTop = 1;
+    descriptor.m_PadBottom = 1;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            -1.0f, -2.0f,  3.0f,  4.0f,
+            -1.0f, -2.0f,  3.0f,  4.0f,
+             1.0f,  2.0f, -3.0f, -4.0f,
+             1.0f,  2.0f, -3.0f, -4.0f,
+        }));
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            -1.0f,  3.0f,  4.0f,
+             1.0f,  3.0f,  4.0f,
+             1.0f,  2.0f, -4.0f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> IgnorePaddingMaxPooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                            float qScale = 1.0f,
+                                                            int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
+    descriptor.m_StrideX = descriptor.m_StrideY = 1;
+    descriptor.m_PadLeft = 1;
+    descriptor.m_PadRight = 1;
+    descriptor.m_PadTop = 1;
+    descriptor.m_PadBottom = 1;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            -1.0f, -2.0f,  3.0f,  4.0f,
+            -1.0f, -2.0f,  3.0f,  4.0f,
+             1.0f,  2.0f, -3.0f, -4.0f,
+             1.0f,  2.0f, -3.0f, -4.0f,
+        }));
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            -1.0f,  3.0f,  4.0f,  4.0f,
+             2.0f,  3.0f,  4.0f,  4.0f,
+             2.0f,  3.0f,  4.0f,  4.0f,
+             2.0f,  2.0f,  2.0f, -3.0f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                                 float qScale = 1.0f,
+                                                                 int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
+    descriptor.m_StrideX = descriptor.m_StrideY = 2;
+    descriptor.m_PadLeft = 1;
+    descriptor.m_PadRight = 1;
+    descriptor.m_PadTop = 1;
+    descriptor.m_PadBottom = 1;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            12.0f, 20.0f, 32.0f, 40.0f,
+            12.0f, 20.0f, 32.0f, 40.0f,
+            12.0f, 20.0f, 32.0f, 40.0f,
+            12.0f, 20.0f, 32.0f, 40.0f,
+        }));
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            3.0f,  13.0f,  10.0f,
+            6.0f,  26.0f,  20.0f,
+            3.0f,  13.0f,  10.0f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> IgnorePaddingSimpleAveragePooling2dNoPaddingTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                                 float qScale = 1.0f,
+                                                                 int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
+    descriptor.m_StrideX = descriptor.m_StrideY = 2;
+    descriptor.m_PadLeft = 0;
+    descriptor.m_PadRight = 0;
+    descriptor.m_PadTop = 0;
+    descriptor.m_PadBottom = 0;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+    descriptor.m_OutputShapeRounding = armnn::OutputShapeRounding::Ceiling;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4}, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 2, 2 }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            1.0f, 2.0f, 3.0f, 4.0f,
+            1.0f, 2.0f, 3.0f, 4.0f,
+            1.0f, 2.0f, 3.0f, 4.0f,
+            1.0f, 2.0f, 3.0f, 4.0f,
+        }));
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            2.0f, 3.5f,
+            2.0f, 3.5f
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> IgnorePaddingAveragePooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                                float qScale = 1.0f,
+                                                                int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::Average;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
+    descriptor.m_StrideX = descriptor.m_StrideY = 1;
+    descriptor.m_PadLeft = 1;
+    descriptor.m_PadRight = 1;
+    descriptor.m_PadTop = 1;
+    descriptor.m_PadBottom = 1;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            9.0f,   27.0f,  18.0f,  36.0f,
+            18.0f,   9.0f,  18.0f,   9.0f,
+            27.0f,  18.0f,   9.0f,  27.0f,
+            9.0f,   27.0f,   9.0f,  18.0f,
+        }));
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+             7.0f,  11.0f,  13.0f, 9.0f,
+            12.0f,  17.0f,  19.0f, 13.0f,
+            12.0f,  16.0f,  16.0f, 10.0f,
+             9.0f,  11.0f,  12.0f, 7.0f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> IgnorePaddingSimpleL2Pooling2dTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                            float qScale = 1.0f,
+                                                            int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 2;
+    descriptor.m_StrideX = descriptor.m_StrideY = 2;
+    descriptor.m_PadLeft = 1;
+    descriptor.m_PadRight = 1;
+    descriptor.m_PadTop = 1;
+    descriptor.m_PadBottom = 1;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 3, 3 }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            2.0f,  4.0f, 8.0f, 16.0f,
+            4.0f,  2.0f, 2.0f, 4.0f,
+            8.0f,  2.0f, 4.0f, 2.0f,
+            16.0f, 2.0f, 2.0f, 8.0f,
+        }));
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+               1.0f,     4.4721f,   8.0f,
+            4.4721f,     2.6457f,   2.236f,
+               8.0f,     1.4142f,   4.0f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
+
+template<typename T>
+LayerTestResult<T, 4> IgnorePaddingL2Pooling2dSize3TestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                           float qScale = 1.0f,
+                                                           int32_t qOffset = 0)
+{
+    armnn::Pooling2dDescriptor descriptor;
+    descriptor.m_PoolType = armnn::PoolingAlgorithm::L2;
+    descriptor.m_PoolWidth = descriptor.m_PoolHeight = 3;
+    descriptor.m_StrideX = descriptor.m_StrideY = 1;
+    descriptor.m_PadLeft = 1;
+    descriptor.m_PadRight = 1;
+    descriptor.m_PadTop = 1;
+    descriptor.m_PadBottom = 1;
+    descriptor.m_PaddingMethod = armnn::PaddingMethod::IgnoreValue;
+
+    armnn::TensorInfo inputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo({ 1, 1, 4, 4 }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo.SetQuantizationScale(qScale);
+        outputTensorInfo.SetQuantizationOffset(qOffset);
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            1.0f, 2.0f, 3.0f, 4.0f,
+            1.0f, 2.0f, 3.0f, 4.0f,
+            1.0f, 2.0f, 3.0f, 4.0f,
+            1.0f, 2.0f, 3.0f, 4.0f,
+        }));
+
+    auto outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+        QuantizedVector<T>(qScale, qOffset, {
+            1.0540f, 1.7638f, 2.5385f, 2.3570f,
+            1.2909f, 2.1602f, 3.1091f, 2.8867f,
+            1.2909f, 2.1602f, 3.1091f, 2.8867f,
+            1.0540f, 1.7638f, 2.5385f, 2.3570f,
+        }));
+
+    return SimplePooling2dTestImpl<T>(workloadFactory, descriptor, qScale, qOffset, input, outputExpected);
+}
diff --git a/src/backends/backendsCommon/test/QuantizeHelper.hpp b/src/backends/backendsCommon/test/QuantizeHelper.hpp
new file mode 100644
index 0000000..bb4e561
--- /dev/null
+++ b/src/backends/backendsCommon/test/QuantizeHelper.hpp
@@ -0,0 +1,91 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <initializer_list>
+#include <iterator>
+#include <vector>
+#include <boost/core/ignore_unused.hpp>
+
+template<typename T, bool DoQuantize=true>
+struct SelectiveQuantizer
+{
+    static T Quantize(float value, float scale, int32_t offset)
+    {
+        return armnn::Quantize<T>(value, scale, offset);
+    }
+
+    static float Dequantize(T value, float scale, int32_t offset)
+    {
+        return armnn::Dequantize(value, scale, offset);
+    }
+};
+
+template<typename T>
+struct SelectiveQuantizer<T, false>
+{
+    static T Quantize(float value, float scale, int32_t offset)
+    {
+        boost::ignore_unused(scale, offset);
+        return value;
+    }
+
+    static float Dequantize(T value, float scale, int32_t offset)
+    {
+        boost::ignore_unused(scale, offset);
+        return value;
+    }
+};
+
+template<typename T>
+T SelectiveQuantize(float value, float scale, int32_t offset)
+{
+    return SelectiveQuantizer<T, armnn::IsQuantizedType<T>()>::Quantize(value, scale, offset);
+};
+
+template<typename T>
+float SelectiveDequantize(T value, float scale, int32_t offset)
+{
+    return SelectiveQuantizer<T, armnn::IsQuantizedType<T>()>::Dequantize(value, scale, offset);
+};
+
+template<typename ItType>
+struct IsFloatingPointIterator
+{
+    static constexpr bool value=std::is_floating_point<typename std::iterator_traits<ItType>::value_type>::value;
+};
+
+template <typename T, typename FloatIt,
+typename std::enable_if<IsFloatingPointIterator<FloatIt>::value, int>::type=0 // Makes sure fp iterator is valid.
+>
+std::vector<T> QuantizedVector(float qScale, int32_t qOffset, FloatIt first, FloatIt last)
+{
+    std::vector<T> quantized;
+    quantized.reserve(boost::numeric_cast<size_t>(std::distance(first, last)));
+
+    for (auto it = first; it != last; ++it)
+    {
+        auto f = *it;
+        T q =SelectiveQuantize<T>(f, qScale, qOffset);
+        quantized.push_back(q);
+    }
+
+    return quantized;
+}
+
+template<typename T>
+std::vector<T> QuantizedVector(float qScale, int32_t qOffset, const std::vector<float>& array)
+{
+    return QuantizedVector<T>(qScale, qOffset, array.begin(), array.end());
+}
+
+template<typename T>
+std::vector<T> QuantizedVector(float qScale, int32_t qOffset, std::initializer_list<float> array)
+{
+    return QuantizedVector<T>(qScale, qOffset, array.begin(), array.end());
+}
diff --git a/src/backends/backendsCommon/test/ReshapeTestImpl.hpp b/src/backends/backendsCommon/test/ReshapeTestImpl.hpp
new file mode 100644
index 0000000..fee992d
--- /dev/null
+++ b/src/backends/backendsCommon/test/ReshapeTestImpl.hpp
@@ -0,0 +1,177 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "QuantizeHelper.hpp"
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+template<typename T>
+LayerTestResult<T, 4> SimpleReshapeTestImpl(
+    armnn::IWorkloadFactory& workloadFactory,
+    armnn::TensorInfo inputTensorInfo,
+    armnn::TensorInfo outputTensorInfo,
+    const std::vector<T>& inputData,
+    const std::vector<T>& outputExpectedData)
+{
+    auto input = MakeTensor<T, 4>(inputTensorInfo, inputData);
+
+    LayerTestResult<T, 4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<T, 4>(outputTensorInfo, outputExpectedData);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ReshapeQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateReshape(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+LayerTestResult<float, 4> SimpleReshapeFloat32Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { 2, 2, 3, 3 };
+    unsigned int outputShape[] = { 2, 2, 9, 1 };
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    std::vector<float> input = std::vector<float>(
+    {
+        0.0f, 1.0f, 2.0f,
+        3.0f, 4.0f, 5.0f,
+        6.0f, 7.0f, 8.0f,
+
+        9.0f, 10.0f, 11.0f,
+        12.0f, 13.0f, 14.0f,
+        15.0f, 16.0f, 17.0f,
+
+        18.0f, 19.0f, 20.0f,
+        21.0f, 22.0f, 23.0f,
+        24.0f, 25.0f, 26.0f,
+
+        27.0f, 28.0f, 29.0f,
+        30.0f, 31.0f, 32.0f,
+        33.0f, 34.0f, 35.0f,
+    });
+
+    std::vector<float> outputExpected = std::vector<float>(
+    {
+        0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f,
+
+        9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
+
+        18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f, 26.0f,
+
+        27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
+    });
+
+    return SimpleReshapeTestImpl<float>(workloadFactory, inputTensorInfo, outputTensorInfo, input, outputExpected);
+}
+
+LayerTestResult<float, 4> SimpleFloorTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(inputTensorInfo);
+
+    auto input = MakeTensor<float, 4>(inputTensorInfo,
+        { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+          1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
+
+    LayerTestResult<float, 4> ret(outputTensorInfo);
+    ret.outputExpected = MakeTensor<float, 4>(outputTensorInfo,
+        { -38.0f, -16.0f, -9.0f, -2.0f, -2.0f, -2.0f, -1.0f, -1.0f, 0.0f,
+          1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 2.0f, 8.0f, 15.0f, 37.0f });
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::FloorQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateFloor(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+
+    return ret;
+}
+
+LayerTestResult<uint8_t, 4> SimpleReshapeUint8Test(armnn::IWorkloadFactory& workloadFactory)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { 2, 2, 3, 3 };
+    unsigned int outputShape[] = { 2, 2, 9, 1 };
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::QuantisedAsymm8);
+    inputTensorInfo.SetQuantizationScale(1.0f);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::QuantisedAsymm8);
+    outputTensorInfo.SetQuantizationScale(1.0f);
+
+    std::vector<uint8_t> input = std::vector<uint8_t>(
+    {
+        0, 1, 2,
+        3, 4, 5,
+        6, 7, 8,
+
+        9, 10, 11,
+        12, 13, 14,
+        15, 16, 17,
+
+        18, 19, 20,
+        21, 22, 23,
+        24, 25, 26,
+
+        27, 28, 29,
+        30, 31, 32,
+        33, 34, 35,
+    });
+
+    std::vector<uint8_t> outputExpected = std::vector<uint8_t>(
+    {
+        0, 1, 2, 3, 4, 5, 6, 7, 8,
+
+        9, 10, 11, 12, 13, 14, 15, 16, 17,
+
+        18, 19, 20, 21, 22, 23, 24, 25, 26,
+
+        27, 28, 29, 30, 31, 32, 33, 34, 35,
+    });
+
+    return SimpleReshapeTestImpl<uint8_t>(workloadFactory, inputTensorInfo, outputTensorInfo, input, outputExpected);
+}
diff --git a/src/backends/backendsCommon/test/RuntimeTestImpl.hpp b/src/backends/backendsCommon/test/RuntimeTestImpl.hpp
new file mode 100644
index 0000000..b446fc4
--- /dev/null
+++ b/src/backends/backendsCommon/test/RuntimeTestImpl.hpp
@@ -0,0 +1,43 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+
+#include <Runtime.hpp>
+
+namespace
+{
+
+inline void CreateAndDropDummyNetwork(const std::vector<armnn::BackendId>& backends, armnn::Runtime& runtime)
+{
+    armnn::NetworkId networkIdentifier;
+    {
+        armnn::TensorInfo inputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
+        armnn::TensorInfo outputTensorInfo(armnn::TensorShape({ 7, 7 }), armnn::DataType::Float32);
+
+        armnn::INetworkPtr network(armnn::INetwork::Create());
+
+        armnn::IConnectableLayer* input = network->AddInputLayer(0, "input");
+        armnn::IConnectableLayer* layer = network->AddActivationLayer(armnn::ActivationDescriptor(), "test");
+        armnn::IConnectableLayer* output = network->AddOutputLayer(0, "output");
+
+        input->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
+        layer->GetOutputSlot(0).Connect(output->GetInputSlot(0));
+
+        // Sets the tensors in the network.
+        input->GetOutputSlot(0).SetTensorInfo(inputTensorInfo);
+        layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+        // optimize the network
+        armnn::IOptimizedNetworkPtr optNet = Optimize(*network, backends, runtime.GetDeviceSpec());
+
+        runtime.LoadNetwork(networkIdentifier, std::move(optNet));
+    }
+
+    runtime.UnloadNetwork(networkIdentifier);
+}
+
+} // anonymous namespace
diff --git a/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp b/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp
new file mode 100644
index 0000000..1e145a1
--- /dev/null
+++ b/src/backends/backendsCommon/test/SoftmaxTestImpl.hpp
@@ -0,0 +1,153 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include "QuantizeHelper.hpp"
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+#include <armnn/TypesUtils.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+#include <algorithm>
+
+template<typename T>
+LayerTestResult<T, 2> SimpleSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory, float beta)
+{
+    using std::exp;
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { 2, 4 };
+
+    inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+    float qScale = 1.f / 256.f;
+    int qOffset = 0;
+    inputTensorInfo.SetQuantizationScale(qScale);
+    inputTensorInfo.SetQuantizationOffset(qOffset);
+
+    outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+    outputTensorInfo.SetQuantizationScale(qScale);
+    outputTensorInfo.SetQuantizationOffset(qOffset);
+
+    LayerTestResult<T, 2> ret(outputTensorInfo);
+
+    // Each row is independently softmax'd.
+    auto input = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(
+        QuantizedVector<T>(qScale, 0, {
+            0.f, 1.f, 0.f, 0.f,
+            .5f, 0.f, 0.f, 0.f,
+        })));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::SoftmaxQueueDescriptor data;
+    data.m_Parameters.m_Beta = beta;
+
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+
+    float x0[4] = { exp((0.f - 1.0f) * beta), exp((1.0f - 1.0f) * beta),
+        exp((0.0f - 1.0f) * beta), exp((0.0f - 1.0f) * beta) };
+    float sum0 = x0[0] + x0[1] + x0[2] + x0[3];
+    float x1[4] = { exp((0.5f - 0.5f) * beta), exp((0.0f - 0.5f) * beta),
+        exp((0.0f - 0.5f) * beta), exp((0.0f - 0.5f) * beta) };
+    float sum1 = x1[0] + x1[1] + x1[2] + x1[3];
+
+    ret.outputExpected = MakeTensor<T, 2>(outputTensorInfo, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+        x0[0] / sum0, x0[1] / sum0, x0[2] / sum0, x0[3] / sum0,
+        x1[0] / sum1, x1[1] / sum1, x1[2] / sum1, x1[3] / sum1
+        })));
+
+    return ret;
+}
+
+template<typename T>
+LayerTestResult<T, 2> CompareSoftmaxTestImpl(armnn::IWorkloadFactory& workloadFactory,
+    armnn::IWorkloadFactory& refWorkloadFactory,
+    float beta)
+{
+
+    const int batchSize = 20;
+    const int channels = 30;
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { batchSize, channels };
+
+    inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+    outputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::GetDataType<T>());
+    float qScale = 1.f / 256.f;
+    int qOffset = 0;
+    inputTensorInfo.SetQuantizationScale(qScale);
+    inputTensorInfo.SetQuantizationOffset(qOffset);
+    outputTensorInfo.SetQuantizationScale(qScale);
+    outputTensorInfo.SetQuantizationOffset(qOffset);
+
+
+    LayerTestResult<T, 2> ret(outputTensorInfo);
+    auto input = MakeRandomTensor<T, 2>(inputTensorInfo, 0xF00D, 0.0f, 1.0f);
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::SoftmaxQueueDescriptor data;
+    data.m_Parameters.m_Beta = beta;
+
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandleRef = refWorkloadFactory.CreateTensorHandle(outputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> inputHandleRef = refWorkloadFactory.CreateTensorHandle(inputTensorInfo);
+
+
+    armnn::SoftmaxQueueDescriptor refData = data;
+    armnn::WorkloadInfo refInfo = info;
+    SetWorkloadInput(refData, refInfo, 0, inputTensorInfo, inputHandleRef.get());
+    SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSoftmax(data, info);
+    std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateSoftmax(refData, refInfo);
+
+    outputHandleRef->Allocate();
+    inputHandleRef->Allocate();
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0]);
+    CopyDataToITensorHandle(inputHandleRef.get(), &input[0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+    refWorkloadFactory.Finalize();
+    workloadRef->Execute();
+
+    CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
+    CopyDataFromITensorHandle(&ret.outputExpected[0][0], outputHandleRef.get());
+
+    return ret;
+}
diff --git a/src/backends/backendsCommon/test/SplitterTestImpl.hpp b/src/backends/backendsCommon/test/SplitterTestImpl.hpp
new file mode 100644
index 0000000..677950c
--- /dev/null
+++ b/src/backends/backendsCommon/test/SplitterTestImpl.hpp
@@ -0,0 +1,304 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/ArmNN.hpp>
+#include <armnn/Tensor.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/WorkloadFactory.hpp>
+#include <backendsCommon/test/QuantizeHelper.hpp>
+
+#include <test/TensorHelpers.hpp>
+
+template<typename T>
+std::vector<LayerTestResult<T,3>> SplitterTestCommon(armnn::IWorkloadFactory& workloadFactory,
+                                                     float qScale = 0.0f,
+                                                     int32_t qOffset = 0)
+{
+    unsigned int inputWidth = 5;
+    unsigned int inputHeight = 6;
+    unsigned int inputChannels = 3;
+
+    // NOTE: Compute Library imposes a restriction that the x and y dimension (input height and width)
+    //       cannot be split.
+    //       For the reasons for this, see first comment on https://jira.arm.com/browse/IVGCVSW-1239
+    //
+    // This test has therefore been recast to split the channels, then split the resulting subtensor.
+
+    // To take channel 0 of original output
+    // and channel 0 and channel 1 of the split subtensor.
+    unsigned int outputWidth1 = inputWidth;
+    unsigned int outputHeight1 = inputHeight;
+    unsigned int outputChannels1 = 1;
+
+    // To take channel 1 and 2 of the original output.
+    unsigned int outputWidth2 = inputWidth;
+    unsigned int outputHeight2 = inputHeight;
+    unsigned int outputChannels2 = 2;
+
+
+    // Define the tensor descriptors.
+    armnn::TensorInfo inputTensorInfo({ inputChannels, inputHeight, inputWidth }, armnn::GetDataType<T>());
+
+    // Outputs of the original split.
+    armnn::TensorInfo outputTensorInfo1({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo2({ outputChannels2, outputHeight2, outputWidth2 }, armnn::GetDataType<T>());
+
+    // Outputs of the subsequent subtensor split.
+    armnn::TensorInfo outputTensorInfo3({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
+    armnn::TensorInfo outputTensorInfo4({ outputChannels1, outputHeight1, outputWidth1 }, armnn::GetDataType<T>());
+
+    // Set quantization parameters if the requested type is a quantized type.
+    // The quantization doesn't really matter as the splitter operator doesn't dequantize/quantize.
+    if(armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(qScale);
+        inputTensorInfo.SetQuantizationOffset(qOffset);
+        outputTensorInfo1.SetQuantizationScale(qScale);
+        outputTensorInfo1.SetQuantizationOffset(qOffset);
+        outputTensorInfo2.SetQuantizationScale(qScale);
+        outputTensorInfo2.SetQuantizationOffset(qOffset);
+        outputTensorInfo3.SetQuantizationScale(qScale);
+        outputTensorInfo3.SetQuantizationOffset(qOffset);
+        outputTensorInfo4.SetQuantizationScale(qScale);
+        outputTensorInfo4.SetQuantizationOffset(qOffset);
+    }
+
+    LayerTestResult<T,3> ret1(outputTensorInfo1);
+    LayerTestResult<T,3> ret2(outputTensorInfo2);
+    LayerTestResult<T,3> ret3(outputTensorInfo3);
+    LayerTestResult<T,3> ret4(outputTensorInfo4);
+
+    auto input = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
+            6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+            11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
+            16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
+            21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
+            26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
+
+            31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
+            36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
+            41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
+            46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
+            51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
+            56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
+
+            61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
+            66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
+            71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
+            76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
+            81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
+            86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
+        })
+    ));
+
+    // Channel 0 of the original input.
+    ret1.outputExpected = MakeTensor<T, 3>(outputTensorInfo1, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
+            6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+            11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
+            16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
+            21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
+            26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
+        })
+    ));
+
+    // Channel 1 & 2 of the original input.
+    ret2.outputExpected = MakeTensor<T, 3>(outputTensorInfo2, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
+            36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
+            41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
+            46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
+            51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
+            56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
+
+            61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
+            66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
+            71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
+            76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
+            81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
+            86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
+        })
+    ));
+
+    // Channel 0 of return 2 (i.e. channels 1 and 2 of the original input).
+    ret3.outputExpected = MakeTensor<T, 3>(outputTensorInfo3, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
+            36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
+            41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
+            46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
+            51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
+            56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
+        })
+    ));
+
+    // Channel 1 of return 2.
+    ret4.outputExpected = MakeTensor<T, 3>(outputTensorInfo4, std::vector<T>(
+        QuantizedVector<T>(qScale, qOffset, {
+            61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
+            66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
+            71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
+            76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
+            81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
+            86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
+        })
+    ));
+
+    // NOTE: as a corollary of the splitting of x and y restriction the x and y values of the view origins
+    //       have to be zero, the co-ordinates are as per the tensor info above channels, height/y, width/x
+    //       note that under the hood the compute engine reverses these i.e. its coordinate system is x, y, channels.
+    std::vector<unsigned int> wOrigin1 = {0, 0, 0}; //Extent of the window is defined by size of output[0].
+    armnn::SplitterQueueDescriptor::ViewOrigin window1(wOrigin1);
+
+    std::vector<unsigned int> wOrigin2 = {1, 0, 0}; //Extent of the window is defined by size of output[1].
+    armnn::SplitterQueueDescriptor::ViewOrigin window2(wOrigin2);
+
+    std::vector<unsigned int> wOrigin3 = {0, 0, 0}; //Extent of the window is defined by size of output[2].
+    armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
+
+    std::vector<unsigned int> wOrigin4 = {1, 0, 0}; //Extent of the window is defined by size of output[3].
+    armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4);
+
+    bool subTensorsSupported = workloadFactory.SupportsSubTensors();
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle  = workloadFactory.CreateTensorHandle(inputTensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle1 =
+        subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo1.GetShape(), wOrigin1.data()) :
+            workloadFactory.CreateTensorHandle(outputTensorInfo1);
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle2 =
+        subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*inputHandle, outputTensorInfo2.GetShape(), wOrigin2.data()) :
+            workloadFactory.CreateTensorHandle(outputTensorInfo2);
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle3 =
+        subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo3.GetShape(), wOrigin3.data()) :
+            workloadFactory.CreateTensorHandle(outputTensorInfo3);
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle4 =
+        subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*outputHandle2, outputTensorInfo4.GetShape(), wOrigin4.data()) :
+            workloadFactory.CreateTensorHandle(outputTensorInfo4);
+
+    // Do the first split
+    armnn::SplitterQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, outputTensorInfo1, outputHandle1.get());
+    AddOutputToWorkload(data, info, outputTensorInfo2, outputHandle2.get());
+
+    data.m_ViewOrigins.push_back(window1);
+    data.m_ViewOrigins.push_back(window2);
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
+
+    inputHandle->Allocate();
+    outputHandle1->Allocate();
+    outputHandle2->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
+
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&ret1.output[0][0][0], outputHandle1.get());
+    CopyDataFromITensorHandle(&ret2.output[0][0][0], outputHandle2.get());
+
+//    // Do the second split.
+    armnn::SplitterQueueDescriptor data2;
+    armnn::WorkloadInfo info2;
+    AddInputToWorkload(data2, info2, outputTensorInfo2, outputHandle2.get());
+    AddOutputToWorkload(data2, info2, outputTensorInfo3, outputHandle3.get());
+    AddOutputToWorkload(data2, info2, outputTensorInfo4, outputHandle4.get());
+
+    data2.m_ViewOrigins.push_back(window3);
+    data2.m_ViewOrigins.push_back(window4);
+
+    std::unique_ptr<armnn::IWorkload> workload2 = workloadFactory.CreateSplitter(data2, info2);
+
+    outputHandle3->Allocate();
+    outputHandle4->Allocate();
+
+    workload2->Execute();
+
+    CopyDataFromITensorHandle(&ret3.output[0][0][0], outputHandle3.get());
+    CopyDataFromITensorHandle(&ret4.output[0][0][0], outputHandle4.get());
+
+    std::vector<LayerTestResult<T,3>> ret = {ret1, ret2, ret3, ret4,};
+
+    return ret;
+}
+
+
+template <typename T>
+LayerTestResult<T, 3> CopyViaSplitterTestImpl(armnn::IWorkloadFactory& workloadFactory, float qScale, int32_t qOffset)
+{
+    const armnn::TensorInfo tensorInfo({ 3, 6, 5 }, armnn::GetDataType<T>());
+    auto input = MakeTensor<T, 3>(tensorInfo, QuantizedVector<T>(qScale, qOffset,
+                                                                 {
+                                                                     1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
+                                                                     6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
+                                                                     11.0f, 12.0f, 13.0f, 14.0f, 15.0f,
+                                                                     16.0f, 17.0f, 18.0f, 19.0f, 20.0f,
+                                                                     21.0f, 22.0f, 23.0f, 24.0f, 25.0f,
+                                                                     26.0f, 27.0f, 28.0f, 29.0f, 30.0f,
+
+                                                                     31.0f, 32.0f, 33.0f, 34.0f, 35.0f,
+                                                                     36.0f, 37.0f, 38.0f, 39.0f, 40.0f,
+                                                                     41.0f, 42.0f, 43.0f, 44.0f, 45.0f,
+                                                                     46.0f, 47.0f, 48.0f, 49.0f, 50.0f,
+                                                                     51.0f, 52.0f, 53.0f, 54.0f, 55.0f,
+                                                                     56.0f, 57.0f, 58.0f, 59.0f, 60.0f,
+
+                                                                     61.0f, 62.0f, 63.0f, 64.0f, 65.0f,
+                                                                     66.0f, 67.0f, 68.0f, 69.0f, 70.0f,
+                                                                     71.0f, 72.0f, 73.0f, 74.0f, 75.0f,
+                                                                     76.0f, 77.0f, 78.0f, 79.0f, 80.0f,
+                                                                     81.0f, 82.0f, 83.0f, 84.0f, 85.0f,
+                                                                     86.0f, 87.0f, 88.0f, 89.0f, 90.0f,
+                                                                 }));
+
+    std::vector<unsigned int> origin = { 0, 0, 0 };
+    armnn::SplitterQueueDescriptor::ViewOrigin window(origin);
+
+    const bool subTensorsSupported = workloadFactory.SupportsSubTensors();
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(tensorInfo);
+
+    std::unique_ptr<armnn::ITensorHandle> outputHandle =
+        subTensorsSupported ?
+            workloadFactory.CreateSubTensorHandle(*inputHandle, tensorInfo.GetShape(), origin.data()) :
+            workloadFactory.CreateTensorHandle(tensorInfo);
+
+    armnn::SplitterQueueDescriptor data;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(data, info, tensorInfo, inputHandle.get());
+    AddOutputToWorkload(data, info, tensorInfo, outputHandle.get());
+
+    data.m_ViewOrigins.push_back(window);
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateSplitter(data, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0]);
+
+    workload->Execute();
+
+    LayerTestResult<T, 3> ret(tensorInfo);
+    CopyDataFromITensorHandle(&ret.output[0][0][0], outputHandle.get());
+    ret.outputExpected = input;
+
+    return ret;
+}
diff --git a/src/backends/backendsCommon/test/TensorCopyUtils.cpp b/src/backends/backendsCommon/test/TensorCopyUtils.cpp
new file mode 100644
index 0000000..acc28c9
--- /dev/null
+++ b/src/backends/backendsCommon/test/TensorCopyUtils.cpp
@@ -0,0 +1,161 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "TensorCopyUtils.hpp"
+
+#include <Half.hpp>
+
+#ifdef ARMCOMPUTECL_ENABLED
+#include <cl/ClTensorHandle.hpp>
+#endif
+
+#if ARMCOMPUTENEON_ENABLED
+#include <neon/NeonTensorHandle.hpp>
+#endif
+
+#if ARMCOMPUTECLENABLED || ARMCOMPUTENEON_ENABLED
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#endif
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+#include <boost/cast.hpp>
+
+#include <algorithm>
+#include <cstring>
+
+void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
+{
+    switch (tensorHandle->GetType())
+    {
+        case armnn::ITensorHandle::Cpu:
+        {
+            auto handle = boost::polymorphic_downcast<armnn::ScopedCpuTensorHandle*>(tensorHandle);
+            memcpy(handle->GetTensor<void>(), mem, handle->GetTensorInfo().GetNumBytes());
+            break;
+        }
+#ifdef ARMCOMPUTECL_ENABLED
+        case armnn::ITensorHandle::CL:
+        {
+            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
+            auto handle = boost::polymorphic_downcast<armnn::IClTensorHandle*>(tensorHandle);
+            handle->Map(true);
+            switch(handle->GetDataType())
+            {
+                case arm_compute::DataType::F32:
+                    CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
+                    break;
+                case arm_compute::DataType::QASYMM8:
+                    CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
+                    break;
+                case arm_compute::DataType::F16:
+                    CopyArmComputeITensorData(static_cast<const armnn::Half*>(mem), handle->GetTensor());
+                    break;
+                default:
+                {
+                    throw armnn::UnimplementedException();
+                }
+            }
+            handle->Unmap();
+            break;
+        }
+#endif
+#if ARMCOMPUTENEON_ENABLED
+        case armnn::ITensorHandle::Neon:
+        {
+            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
+            auto handle = boost::polymorphic_downcast<armnn::INeonTensorHandle*>(tensorHandle);
+            switch (handle->GetDataType())
+            {
+                case arm_compute::DataType::F32:
+                    CopyArmComputeITensorData(static_cast<const float*>(mem), handle->GetTensor());
+                    break;
+                case arm_compute::DataType::QASYMM8:
+                    CopyArmComputeITensorData(static_cast<const uint8_t*>(mem), handle->GetTensor());
+                    break;
+                default:
+                {
+                    throw armnn::UnimplementedException();
+                }
+            }
+            break;
+        }
+#endif
+        default:
+        {
+            throw armnn::UnimplementedException();
+        }
+    }
+}
+
+void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle)
+{
+    switch (tensorHandle->GetType())
+    {
+        case armnn::ITensorHandle::Cpu:
+        {
+            auto handle = boost::polymorphic_downcast<const armnn::ScopedCpuTensorHandle*>(tensorHandle);
+            memcpy(mem, handle->GetTensor<void>(), handle->GetTensorInfo().GetNumBytes());
+            break;
+        }
+#ifdef ARMCOMPUTECL_ENABLED
+        case armnn::ITensorHandle::CL:
+        {
+            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
+            auto handle = boost::polymorphic_downcast<const armnn::IClTensorHandle*>(tensorHandle);
+            const_cast<armnn::IClTensorHandle*>(handle)->Map(true);
+            switch(handle->GetDataType())
+            {
+                case arm_compute::DataType::F32:
+                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
+                    break;
+                case arm_compute::DataType::QASYMM8:
+                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
+                    break;
+                case arm_compute::DataType::F16:
+                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<armnn::Half*>(mem));
+                    break;
+                default:
+                {
+                    throw armnn::UnimplementedException();
+                }
+            }
+            const_cast<armnn::IClTensorHandle*>(handle)->Unmap();
+            break;
+        }
+#endif
+#if ARMCOMPUTENEON_ENABLED
+        case armnn::ITensorHandle::Neon:
+        {
+            using armnn::armcomputetensorutils::CopyArmComputeITensorData;
+            auto handle = boost::polymorphic_downcast<const armnn::INeonTensorHandle*>(tensorHandle);
+            switch (handle->GetDataType())
+            {
+                case arm_compute::DataType::F32:
+                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<float*>(mem));
+                    break;
+                case arm_compute::DataType::QASYMM8:
+                    CopyArmComputeITensorData(handle->GetTensor(), static_cast<uint8_t*>(mem));
+                    break;
+                default:
+                {
+                    throw armnn::UnimplementedException();
+                }
+            }
+            break;
+        }
+#endif
+        default:
+        {
+            throw armnn::UnimplementedException();
+        }
+    }
+}
+
+void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem)
+{
+    tensorHandle->Allocate();
+    CopyDataToITensorHandle(tensorHandle, mem);
+}
diff --git a/src/backends/backendsCommon/test/TensorCopyUtils.hpp b/src/backends/backendsCommon/test/TensorCopyUtils.hpp
new file mode 100644
index 0000000..2187523
--- /dev/null
+++ b/src/backends/backendsCommon/test/TensorCopyUtils.hpp
@@ -0,0 +1,15 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Tensor.hpp>
+
+#include <backendsCommon/ITensorHandle.hpp>
+
+void CopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem);
+
+void CopyDataFromITensorHandle(void* mem, const armnn::ITensorHandle* tensorHandle);
+
+void AllocateAndCopyDataToITensorHandle(armnn::ITensorHandle* tensorHandle, const void* mem);
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
new file mode 100644
index 0000000..3664d56
--- /dev/null
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -0,0 +1,474 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "WorkloadTestUtils.hpp"
+
+#include <armnn/Exceptions.hpp>
+
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <backendsCommon/Workload.hpp>
+
+#include <reference/workloads/RefWorkloads.hpp>
+#include <reference/RefWorkloadFactory.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+using namespace armnn;
+
+BOOST_AUTO_TEST_SUITE(WorkloadInfoValidation)
+
+
+
+BOOST_AUTO_TEST_CASE(QueueDescriptor_Validate_WrongNumOfInputsOutputs)
+{
+    InputQueueDescriptor invalidData;
+    WorkloadInfo invalidInfo;
+    //Invalid argument exception is expected, because no inputs and no outputs were defined.
+    BOOST_CHECK_THROW(RefWorkloadFactory().CreateInput(invalidData, invalidInfo), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(RefPooling2dFloat32Workload_Validate_WrongDimTensor)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[]  = {2, 3, 4}; // <- Invalid - input tensor has to be 4D.
+    unsigned int outputShape[] = {2, 3, 4, 5};
+
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+    inputTensorInfo  = armnn::TensorInfo(3, inputShape, armnn::DataType::Float32);
+
+    Pooling2dQueueDescriptor invalidData;
+    WorkloadInfo           invalidInfo;
+
+    AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+    AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
+
+    // Invalid argument exception is expected, input tensor has to be 4D.
+    BOOST_CHECK_THROW(RefPooling2dFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(SoftmaxQueueDescriptor_Validate_WrongInputHeight)
+{
+    unsigned int inputHeight = 1;
+    unsigned int inputWidth = 1;
+    unsigned int inputChannels = 4;
+    unsigned int inputNum = 2;
+
+    unsigned int outputChannels = inputChannels;
+    unsigned int outputHeight = inputHeight + 1;    //Makes data invalid - Softmax expects height and width to be 1.
+    unsigned int outputWidth = inputWidth;
+    unsigned int outputNum = inputNum;
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
+    unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    SoftmaxQueueDescriptor invalidData;
+    WorkloadInfo           invalidInfo;
+
+    AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
+    AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+
+    //Invalid argument exception is expected, because height != 1.
+    BOOST_CHECK_THROW(RefSoftmaxFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(FullyConnectedQueueDescriptor_Validate_RequiredDataMissing)
+{
+    unsigned int inputWidth = 1;
+    unsigned int inputHeight = 1;
+    unsigned int inputChannels = 5;
+    unsigned int inputNum = 2;
+
+    unsigned int outputWidth = 1;
+    unsigned int outputHeight = 1;
+    unsigned int outputChannels = 3;
+    unsigned int outputNum = 2;
+
+    // Define the tensor descriptors.
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+    armnn::TensorInfo weightsDesc;
+    armnn::TensorInfo biasesDesc;
+
+    unsigned int inputShape[] = { inputNum, inputChannels, inputHeight, inputWidth };
+    unsigned int outputShape[] = { outputNum, outputChannels, outputHeight, outputWidth };
+    unsigned int weightsShape[] = { 1, 1, inputChannels, outputChannels };
+    unsigned int biasShape[] = { 1, outputChannels, outputHeight, outputWidth };
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+    weightsDesc = armnn::TensorInfo(4, weightsShape, armnn::DataType::Float32);
+    biasesDesc = armnn::TensorInfo(4, biasShape, armnn::DataType::Float32);
+
+    FullyConnectedQueueDescriptor invalidData;
+    WorkloadInfo                  invalidInfo;
+
+    ScopedCpuTensorHandle weightTensor(weightsDesc);
+    ScopedCpuTensorHandle biasTensor(biasesDesc);
+
+    AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
+    AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+    invalidData.m_Weight = &weightTensor;
+    invalidData.m_Bias = &biasTensor;
+    invalidData.m_Parameters.m_BiasEnabled = true;
+    invalidData.m_Parameters.m_TransposeWeightMatrix = false;
+
+
+    //Invalid argument exception is expected, because not all required fields have been provided.
+    //In particular inputsData[0], outputsData[0] and weightsData can not be null.
+    BOOST_CHECK_THROW(RefFullyConnectedFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+}
+
+
+BOOST_AUTO_TEST_CASE(NormalizationQueueDescriptor_Validate_WrongInputHeight)
+{
+    constexpr unsigned int inputNum = 5;
+    constexpr unsigned int inputHeight   = 32;
+    constexpr unsigned int inputWidth    = 24;
+    constexpr unsigned int inputChannels = 3;
+
+    constexpr unsigned int outputNum = inputNum;
+    constexpr unsigned int outputChannels = inputChannels;
+    constexpr unsigned int outputHeight = inputHeight + 1; //Makes data invalid - normalization requires.
+                                                           //Input and output to have the same dimensions.
+    constexpr unsigned int outputWidth  = inputWidth;
+
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[]  = {inputNum, inputChannels, inputHeight, inputWidth};
+    unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+
+    armnn::NormalizationAlgorithmMethod normMethod = armnn::NormalizationAlgorithmMethod::LocalBrightness;
+    armnn::NormalizationAlgorithmChannel normChannel = armnn::NormalizationAlgorithmChannel::Across;
+    float alpha = 1.f;
+    float beta = 1.f;
+    float kappa = 1.f;
+    uint32_t normSize = 5;
+
+    NormalizationQueueDescriptor invalidData;
+    WorkloadInfo                 invalidInfo;
+
+    AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
+    AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+    invalidData.m_Parameters.m_NormChannelType = normChannel;
+    invalidData.m_Parameters.m_NormMethodType  = normMethod;
+    invalidData.m_Parameters.m_NormSize        = normSize;
+    invalidData.m_Parameters.m_Alpha           = alpha;
+    invalidData.m_Parameters.m_Beta            = beta;
+    invalidData.m_Parameters.m_K               = kappa;
+
+    //Invalid argument exception is expected, because input height != output height.
+    BOOST_CHECK_THROW(RefNormalizationFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(SplitterQueueDescriptor_Validate_WrongWindow)
+{
+    constexpr unsigned int inputNum = 1;
+    constexpr unsigned int inputHeight   = 32;
+    constexpr unsigned int inputWidth    = 24;
+    constexpr unsigned int inputChannels = 3;
+
+    constexpr unsigned int outputNum = inputNum;
+    constexpr unsigned int outputChannels = inputChannels;
+    constexpr unsigned int outputHeight = 18;
+    constexpr unsigned int outputWidth  = inputWidth;
+
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[]  = {inputNum, inputChannels, inputHeight, inputWidth};
+    unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    SplitterQueueDescriptor invalidData;
+    WorkloadInfo            invalidInfo;
+
+    AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
+    AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+
+    // Invalid, since it has only 3 dimensions while the input tensor is 4d.
+    std::vector<unsigned int> wOrigin = {0, 0, 0};
+    armnn::SplitterQueueDescriptor::ViewOrigin window(wOrigin);
+    invalidData.m_ViewOrigins.push_back(window);
+
+    BOOST_TEST_INFO("Invalid argument exception is expected, because split window dimensionality does not "
+        "match input.");
+    BOOST_CHECK_THROW(RefSplitterFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+
+    // Invalid, since window extends past the boundary of input tensor.
+    std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
+    armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
+    invalidData.m_ViewOrigins[0] = window3;
+    BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ outputHeight > inputHeight");
+    BOOST_CHECK_THROW(RefSplitterFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+
+
+    std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
+    armnn::SplitterQueueDescriptor::ViewOrigin window4(wOrigin4);
+    invalidData.m_ViewOrigins[0] = window4;
+
+    std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
+    armnn::SplitterQueueDescriptor::ViewOrigin window5(wOrigin4);
+    invalidData.m_ViewOrigins.push_back(window5);
+
+    BOOST_TEST_INFO("Invalid exception due to number of split windows not matching number of outputs.");
+    BOOST_CHECK_THROW(RefSplitterFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+}
+
+
+BOOST_AUTO_TEST_CASE(MergerQueueDescriptor_Validate_WrongWindow)
+{
+    constexpr unsigned int inputNum = 1;
+    constexpr unsigned int inputChannels = 3;
+    constexpr unsigned int inputHeight   = 32;
+    constexpr unsigned int inputWidth    = 24;
+
+    constexpr unsigned int outputNum = 1;
+    constexpr unsigned int outputChannels = 3;
+    constexpr unsigned int outputHeight = 32;
+    constexpr unsigned int outputWidth  = 24;
+
+
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[]  = {inputNum, inputChannels, inputHeight, inputWidth};
+    unsigned int outputShape[] = {outputNum, outputChannels, outputHeight, outputWidth};
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    MergerQueueDescriptor invalidData;
+    WorkloadInfo          invalidInfo;
+
+    AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
+    AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+
+    // Invalid, since it has only 3 dimensions while the input tensor is 4d.
+    std::vector<unsigned int> wOrigin = {0, 0, 0};
+    armnn::MergerQueueDescriptor::ViewOrigin window(wOrigin);
+    invalidData.m_ViewOrigins.push_back(window);
+
+    BOOST_TEST_INFO("Invalid argument exception is expected, because merge window dimensionality does not "
+        "match input.");
+    BOOST_CHECK_THROW(RefMergerFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+
+    // Invalid, since window extends past the boundary of output tensor.
+    std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
+    armnn::MergerQueueDescriptor::ViewOrigin window3(wOrigin3);
+    invalidData.m_ViewOrigins[0] = window3;
+    BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight");
+    BOOST_CHECK_THROW(RefMergerFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+
+
+    std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
+    armnn::MergerQueueDescriptor::ViewOrigin window4(wOrigin4);
+    invalidData.m_ViewOrigins[0] = window4;
+
+    std::vector<unsigned int> wOrigin5 = {1, 16, 20, 2};
+    armnn::MergerQueueDescriptor::ViewOrigin window5(wOrigin4);
+    invalidData.m_ViewOrigins.push_back(window5);
+
+    BOOST_TEST_INFO("Invalid exception due to number of merge windows not matching number of inputs.");
+    BOOST_CHECK_THROW(RefMergerFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(AdditionQueueDescriptor_Validate_InputNumbers)
+{
+    armnn::TensorInfo input1TensorInfo;
+    armnn::TensorInfo input2TensorInfo;
+    armnn::TensorInfo input3TensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int shape[]  = {1, 1, 1, 1};
+
+    input1TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    input2TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    input3TensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, shape, armnn::DataType::Float32);
+
+    AdditionQueueDescriptor invalidData;
+    WorkloadInfo            invalidInfo;
+
+    AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
+    AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+
+    // Too few inputs.
+    BOOST_CHECK_THROW(RefAdditionFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+
+    AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
+
+    // Correct.
+    BOOST_CHECK_NO_THROW(RefAdditionFloat32Workload(invalidData, invalidInfo));
+
+    AddInputToWorkload(invalidData, invalidInfo, input3TensorInfo, nullptr);
+
+    // Too many inputs.
+    BOOST_CHECK_THROW(RefAdditionFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_CASE(AdditionQueueDescriptor_Validate_InputShapes)
+{
+    armnn::TensorInfo input1TensorInfo;
+    armnn::TensorInfo input2TensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int shape1[] = {1, 1, 2, 1};
+    unsigned int shape2[] = {1, 1, 3, 2};
+
+    // Incompatible shapes even with broadcasting.
+    {
+        input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
+        input2TensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32);
+        outputTensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
+
+        AdditionQueueDescriptor invalidData;
+        WorkloadInfo            invalidInfo;
+
+        AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
+        AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
+        AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+
+        BOOST_CHECK_THROW(RefAdditionFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    }
+
+    // Output size not compatible with input sizes.
+    {
+        input1TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
+        input2TensorInfo = armnn::TensorInfo(4, shape1, armnn::DataType::Float32);
+        outputTensorInfo = armnn::TensorInfo(4, shape2, armnn::DataType::Float32);
+
+        AdditionQueueDescriptor invalidData;
+        WorkloadInfo            invalidInfo;
+
+        AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
+        AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
+        AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+
+        // Output differs.
+        BOOST_CHECK_THROW(RefAdditionFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    }
+}
+
+BOOST_AUTO_TEST_CASE(MultiplicationQueueDescriptor_Validate_InputTensorDimensionMismatch)
+{
+    armnn::TensorInfo input0TensorInfo;
+    armnn::TensorInfo input1TensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    constexpr unsigned int input0Shape[] = { 2, 2, 4, 4 };
+    constexpr std::size_t dimensionCount = std::extent<decltype(input0Shape)>::value;
+
+    // Checks dimension consistency for input tensors.
+    for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex)
+    {
+        unsigned int input1Shape[dimensionCount];
+        for (unsigned int i = 0; i < dimensionCount; ++i)
+        {
+            input1Shape[i] = input0Shape[i];
+        }
+
+        ++input1Shape[dimIndex];
+
+        input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
+        input1TensorInfo = armnn::TensorInfo(dimensionCount, input1Shape, armnn::DataType::Float32);
+        outputTensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
+
+        MultiplicationQueueDescriptor invalidData;
+        WorkloadInfo                  invalidInfo;
+
+        AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+        AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
+        AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
+
+        BOOST_CHECK_THROW(RefMultiplicationFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    }
+
+    // Checks dimension consistency for input and output tensors.
+    for (unsigned int dimIndex = 0; dimIndex < dimensionCount; ++dimIndex)
+    {
+        unsigned int outputShape[dimensionCount];
+        for (unsigned int i = 0; i < dimensionCount; ++i)
+        {
+            outputShape[i] = input0Shape[i];
+        }
+
+        ++outputShape[dimIndex];
+
+        input0TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
+        input1TensorInfo = armnn::TensorInfo(dimensionCount, input0Shape, armnn::DataType::Float32);
+        outputTensorInfo = armnn::TensorInfo(dimensionCount, outputShape, armnn::DataType::Float32);
+
+        MultiplicationQueueDescriptor invalidData;
+        WorkloadInfo                  invalidInfo;
+
+        AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+        AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
+        AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
+
+        BOOST_CHECK_THROW(RefMultiplicationFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    }
+}
+
+BOOST_AUTO_TEST_CASE(ReshapeQueueDescriptor_Validate_MismatchingNumElements)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    // The input and output shapes should have the same number of elements, but these don't.
+    unsigned int inputShape[] = { 1, 1, 2, 3 };
+    unsigned int outputShape[] = { 1, 1, 1, 2 };
+
+    inputTensorInfo = armnn::TensorInfo(4, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(4, outputShape, armnn::DataType::Float32);
+
+    ReshapeQueueDescriptor invalidData;
+    WorkloadInfo           invalidInfo;
+
+    AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
+    AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+
+    // InvalidArgumentException is expected, because the number of elements don't match.
+    BOOST_CHECK_THROW(RefReshapeFloat32Workload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+}
+
+
+BOOST_AUTO_TEST_CASE(LstmQueueDescriptor_Validate)
+{
+    armnn::TensorInfo inputTensorInfo;
+    armnn::TensorInfo outputTensorInfo;
+
+    unsigned int inputShape[] = { 1, 2 };
+    unsigned int outputShape[] = { 1 };
+
+    inputTensorInfo = armnn::TensorInfo(2, inputShape, armnn::DataType::Float32);
+    outputTensorInfo = armnn::TensorInfo(1, outputShape, armnn::DataType::Float32);
+
+    LstmQueueDescriptor invalidData;
+    WorkloadInfo        invalidInfo;
+
+    AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
+    AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
+
+    BOOST_CHECK_THROW(invalidData.Validate(invalidInfo), armnn::InvalidArgumentException);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
new file mode 100644
index 0000000..05f6dde
--- /dev/null
+++ b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
@@ -0,0 +1,56 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+#pragma once
+
+#include <armnn/Tensor.hpp>
+
+#include <backendsCommon/WorkloadInfo.hpp>
+
+namespace armnn
+{
+class ITensorHandle;
+}
+
+template <typename QueueDescriptor>
+void AddInputToWorkload(QueueDescriptor& descriptor,
+    armnn::WorkloadInfo& info,
+    const armnn::TensorInfo& tensorInfo,
+    armnn::ITensorHandle* tensorHandle)
+{
+    descriptor.m_Inputs.push_back(tensorHandle);
+    info.m_InputTensorInfos.push_back(tensorInfo);
+}
+
+template <typename QueueDescriptor>
+void AddOutputToWorkload(QueueDescriptor& descriptor,
+    armnn::WorkloadInfo& info,
+    const armnn::TensorInfo& tensorInfo,
+    armnn::ITensorHandle* tensorHandle)
+{
+    descriptor.m_Outputs.push_back(tensorHandle);
+    info.m_OutputTensorInfos.push_back(tensorInfo);
+}
+
+template <typename QueueDescriptor>
+void SetWorkloadInput(QueueDescriptor& descriptor,
+    armnn::WorkloadInfo& info,
+    unsigned int index,
+    const armnn::TensorInfo& tensorInfo,
+    armnn::ITensorHandle* tensorHandle)
+{
+    descriptor.m_Inputs[index] = tensorHandle;
+    info.m_InputTensorInfos[index] = tensorInfo;
+}
+
+template <typename QueueDescriptor>
+void SetWorkloadOutput(QueueDescriptor& descriptor,
+    armnn::WorkloadInfo& info,
+    unsigned int index,
+    const armnn::TensorInfo& tensorInfo,
+    armnn::ITensorHandle* tensorHandle)
+{
+    descriptor.m_Outputs[index] = tensorHandle;
+    info.m_OutputTensorInfos[index] = tensorInfo;
+}
\ No newline at end of file