IVGCVSW-3217 Refactor the Layer tests for ResizeBilinear to make them generic

 * Refactored the ResizeBilinear tests so can be used for both Float32 and Uint8.
 * Moved to .hpp file and renamed tests accordingly.

Signed-off-by: Ellen Norris-Thompson <ellen.norris-thompson@arm.com>
Change-Id: Icf79b0616db0c307cfcf94747fe0a6d4343588bd
diff --git a/src/backends/backendsCommon/test/LayerTests.cpp b/src/backends/backendsCommon/test/LayerTests.cpp
index fb07f9f..af426a4 100644
--- a/src/backends/backendsCommon/test/LayerTests.cpp
+++ b/src/backends/backendsCommon/test/LayerTests.cpp
@@ -5224,342 +5224,6 @@
         workloadFactory, memoryManager, 0.0f, 0, useSubtensor);
 }
 
-LayerTestResult<float, 4> ResizeBilinearNopTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout dataLayout)
-{
-    const armnn::TensorInfo inputTensorInfo =
-        armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
-
-    const armnn::TensorInfo outputTensorInfo =
-        armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
-
-    std::vector<float> inputData({
-        1.0f, 2.0f, 3.0f, 4.0f,
-        2.0f, 3.0f, 4.0f, 5.0f,
-        3.0f, 4.0f, 5.0f, 6.0f,
-        4.0f, 5.0f, 6.0f, 7.0f,
-
-        1.0f, 2.0f, 3.0f, 4.0f,
-        2.0f, 3.0f, 4.0f, 5.0f,
-        3.0f, 4.0f, 5.0f, 6.0f,
-        4.0f, 5.0f, 6.0f, 7.0f
-    });
-
-    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout == armnn::DataLayout::NHWC)
-    {
-        std::vector<float> tmp(inputData.size());
-        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
-        inputData = tmp;
-    }
-
-    auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
-
-    LayerTestResult<float, 4> result(outputTensorInfo);
-    result.outputExpected = input;
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ResizeBilinearQueueDescriptor descriptor;
-    descriptor.m_Parameters.m_DataLayout = dataLayout;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
-}
-
-LayerTestResult<float, 4> SimpleResizeBilinearTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout dataLayout)
-{
-    const armnn::TensorInfo inputTensorInfo =
-        armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
-
-    const armnn::TensorInfo outputTensorInfo =
-        armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, armnn::DataType::Float32);
-
-    std::vector<float> inputData({
-          1.0f, 255.0f,
-        200.0f, 250.0f,
-
-        250.0f, 200.0f,
-        250.0f,   1.0f
-    });
-
-    // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
-    // then figures out the interpolants and weights. Note this is different to projecting the centre of the
-    // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
-    // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
-    // which we would expect if projecting the centre).
-
-    std::vector<float> outputData({
-          1.0f,
-
-        250.0f
-    });
-
-    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout == armnn::DataLayout::NHWC)
-    {
-        std::vector<float> tmp(inputData.size());
-        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
-        inputData = tmp;
-
-        std::vector<float> tmp1(outputData.size());
-        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
-        outputData = tmp1;
-    }
-
-    auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
-
-    LayerTestResult<float, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ResizeBilinearQueueDescriptor descriptor;
-    descriptor.m_Parameters.m_DataLayout = dataLayout;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
-}
-
-LayerTestResult<float, 4> ResizeBilinearSqMinTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout dataLayout)
-{
-    const armnn::TensorInfo inputTensorInfo =
-        armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, armnn::DataType::Float32);
-
-    const armnn::TensorInfo outputTensorInfo =
-        armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, armnn::DataType::Float32);
-
-    std::vector<float> inputData({
-        1.0f, 2.0f, 3.0f, 4.0f,
-        2.0f, 3.0f, 4.0f, 5.0f,
-        3.0f, 4.0f, 5.0f, 6.0f,
-        4.0f, 5.0f, 6.0f, 7.0f,
-
-        7.0f, 6.0f, 5.0f, 4.0f,
-        6.0f, 5.0f, 4.0f, 3.0f,
-        5.0f, 4.0f, 3.0f, 2.0f,
-        4.0f, 3.0f, 2.0f, 1.0f
-    });
-
-    std::vector<float> outputData({
-        1.0f, 3.0f,
-        3.0f, 5.0f,
-
-        7.0f, 5.0f,
-        5.0f, 3.0f
-    });
-
-    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout == armnn::DataLayout::NHWC)
-    {
-        std::vector<float> tmp(inputData.size());
-        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
-        inputData = tmp;
-
-        std::vector<float> tmp1(outputData.size());
-        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
-        outputData = tmp1;
-    }
-
-    auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
-
-    LayerTestResult<float, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ResizeBilinearQueueDescriptor descriptor;
-    descriptor.m_Parameters.m_DataLayout = dataLayout;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
-}
-
-LayerTestResult<float, 4> ResizeBilinearMinTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout dataLayout)
-{
-    const armnn::TensorInfo inputTensorInfo =
-        armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
-
-    const armnn::TensorInfo outputTensorInfo =
-        armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, armnn::DataType::Float32);
-
-    std::vector<float> inputData({
-          1.0f,   2.0f,   3.0f,   5.0f,   8.0f,
-         13.0f,  21.0f,  34.0f,  55.0f,  89.0f,
-        144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
-
-        987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
-         89.0f,  55.0f,  34.0f,  21.0f,  13.0f,
-          8.0f,   5.0f,   3.0f,   2.0f,   1.0f
-    });
-
-    std::vector<float> outputData({
-          1.0f,   2.6666f,   6.00f,
-         78.5f, 179.3333f, 401.00f,
-
-        987.0f, 454.6670f, 203.33f,
-         48.5f,  22.3333f,  10.00f
-    });
-
-    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout == armnn::DataLayout::NHWC)
-    {
-        std::vector<float> tmp(inputData.size());
-        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
-        inputData = tmp;
-
-        std::vector<float> tmp1(outputData.size());
-        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
-        outputData = tmp1;
-    }
-
-    auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
-
-    LayerTestResult<float, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ResizeBilinearQueueDescriptor descriptor;
-    descriptor.m_Parameters.m_DataLayout = dataLayout;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
-}
-
-LayerTestResult<float, 4> ResizeBilinearMagTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout dataLayout)
-{
-    const armnn::TensorInfo inputTensorInfo =
-        armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, armnn::DataType::Float32);
-
-    const armnn::TensorInfo outputTensorInfo =
-        armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, armnn::DataType::Float32);
-
-    std::vector<float> inputData({
-          1.0f,   2.0f,
-         13.0f,  21.0f,
-        144.0f, 233.0f,
-
-        233.0f, 144.0f,
-         21.0f,  13.0f,
-          2.0f,   1.0f
-    });
-
-    std::vector<float> outputData({
-          1.0f,   1.4f,   1.8f,   2.0f,   2.0f,
-         13.0f,  16.2f,  19.4f,  21.0f,  21.0f,
-        144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
-
-        233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
-         21.0f,  17.8f,  14.6f,  13.0f,  13.0f,
-          2.0f,   1.6f,   1.2f,   1.0f,   1.0f
-    });
-
-    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
-    if (dataLayout == armnn::DataLayout::NHWC)
-    {
-        std::vector<float> tmp(inputData.size());
-        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
-        inputData = tmp;
-
-        std::vector<float> tmp1(outputData.size());
-        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
-        outputData = tmp1;
-    }
-
-    auto input = MakeTensor<float, 4>(inputTensorInfo, inputData);
-
-    LayerTestResult<float, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, outputData);
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ResizeBilinearQueueDescriptor descriptor;
-    descriptor.m_Parameters.m_DataLayout = dataLayout;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
-}
-
 LayerTestResult<float, 2> FakeQuantizationTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
@@ -7634,293 +7298,6 @@
                                                                    shape0, output, 1.0f, 0);
 }
 
-LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    constexpr unsigned int inputWidth = 4;
-    constexpr unsigned int inputHeight = 4;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
-
-    constexpr unsigned int outputWidth = inputWidth;
-    constexpr unsigned int outputHeight = inputHeight;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
-
-    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::QuantisedAsymm8);
-    inputTensorInfo.SetQuantizationScale(1.5f);
-    inputTensorInfo.SetQuantizationOffset(-3);
-
-    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::QuantisedAsymm8);
-    outputTensorInfo.SetQuantizationScale(1.5f);
-    outputTensorInfo.SetQuantizationOffset(-3);
-
-    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
-        1, 2, 3, 4,
-        2, 3, 4, 5,
-        3, 4, 5, 6,
-        4, 5, 6, 7
-    }));
-
-    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
-    result.outputExpected = input;
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ResizeBilinearQueueDescriptor descriptor;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
-}
-
-LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    constexpr unsigned int inputWidth = 2;
-    constexpr unsigned int inputHeight = 2;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
-
-    constexpr unsigned int outputWidth = inputWidth / 2;
-    constexpr unsigned int outputHeight = inputHeight / 2;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
-
-    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::QuantisedAsymm8);
-    inputTensorInfo.SetQuantizationScale(0.1567f);
-    inputTensorInfo.SetQuantizationOffset(1);
-
-    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::QuantisedAsymm8);
-    outputTensorInfo.SetQuantizationScale(0.1567f);
-    outputTensorInfo.SetQuantizationOffset(1);
-
-    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
-        1, 255,
-        200, 250
-    }));
-
-    // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
-    // then figures out the interpolants and weights. Note this is different to projecting the centre of the
-    // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
-    // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
-    // the centre).
-    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
-        1
-    }));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ResizeBilinearQueueDescriptor descriptor;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
-}
-
-LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    constexpr unsigned int inputWidth = 4;
-    constexpr unsigned int inputHeight = 4;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
-
-    constexpr unsigned int outputWidth = inputWidth / 2;
-    constexpr unsigned int outputHeight = inputHeight / 2;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
-
-    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::QuantisedAsymm8);
-    inputTensorInfo.SetQuantizationScale(3.141592f);
-    inputTensorInfo.SetQuantizationOffset(3);
-
-    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::QuantisedAsymm8);
-    outputTensorInfo.SetQuantizationScale(3.141592f);
-    outputTensorInfo.SetQuantizationOffset(3);
-
-    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
-        1, 2, 3, 4,
-        2, 3, 4, 5,
-        3, 4, 5, 6,
-        4, 5, 6, 7
-    }));
-
-    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
-        1, 3,
-        3, 5
-    }));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ResizeBilinearQueueDescriptor descriptor;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
-}
-
-LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    constexpr unsigned int inputWidth = 3;
-    constexpr unsigned int inputHeight = 2;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
-
-    constexpr unsigned int outputWidth = 2;
-    constexpr unsigned int outputHeight = 1;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
-
-    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::QuantisedAsymm8);
-    inputTensorInfo.SetQuantizationScale(1.5f);
-    inputTensorInfo.SetQuantizationOffset(-1);
-
-    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::QuantisedAsymm8);
-    outputTensorInfo.SetQuantizationScale(1.5f);
-    outputTensorInfo.SetQuantizationOffset(-1);
-
-    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
-        1,  2,  3, // 3.0, 4.5, 6.0
-        5,  8, 13  // 9.0, 13.5, 21.0
-    }));
-
-    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
-        1, 3 // 3.0, 5.25
-    }));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ResizeBilinearQueueDescriptor descriptor;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
-}
-
-LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
-{
-    constexpr unsigned int inputWidth = 2;
-    constexpr unsigned int inputHeight = 3;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
-
-    constexpr unsigned int outputWidth = 5;
-    constexpr unsigned int outputHeight = 3;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
-
-    armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::QuantisedAsymm8);
-    inputTensorInfo.SetQuantizationScale(0.010765f);
-    inputTensorInfo.SetQuantizationOffset(7);
-
-    armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::QuantisedAsymm8);
-    outputTensorInfo.SetQuantizationScale(0.010132f);
-    outputTensorInfo.SetQuantizationOffset(-18);
-
-    auto input = MakeTensor<uint8_t, 4>(inputTensorInfo, std::vector<uint8_t>({
-         24, 228, // 0.183005, 2.379065,
-        105, 128, // 1.05497, 1.302565
-        230,  71  // 2.400595, 0.68896
-    }));
-
-    LayerTestResult<uint8_t, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<uint8_t, 4>(outputTensorInfo, std::vector<uint8_t>({
-          0,  87, 173, 217, 217, // 0.18300501, 1.06142902, 1.93985295, 2.37906504, 2.37906504
-         86,  96, 106, 111, 111, // 1.05497003, 1.15400803, 1.25304604, 1.30256498, 1.30256498
-        219, 151,  84,  50,  50  // 2.40059495, 1.71594095, 1.03128707, 0.68896002, 0.68896002
-    }));
-
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ResizeBilinearQueueDescriptor descriptor;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    workload->PostAllocationConfigure();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
-}
-
 LayerTestResult<float, 4> BatchNormTest(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index 1910585..8bbd0d4 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -10,6 +10,8 @@
 #include <Half.hpp>
 #include "TensorCopyUtils.hpp"
 #include "WorkloadTestUtils.hpp"
+#include "TensorUtils.hpp"
+#include "Permute.hpp"
 
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <backendsCommon/IBackendInternal.hpp>
@@ -843,36 +845,41 @@
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 
-// Tests that the output should be identical to the input when the output dimensions match the input ones.
-LayerTestResult<float, 4> ResizeBilinearNopTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout dataLayout);
+/// Tests that the output should be identical to the input when the output dimensions match the input ones.
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeBilinearNopTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
 
 // Tests the behaviour of the resize bilinear operation when rescaling a 2x2 image into a 1x1 image.
-LayerTestResult<float, 4> SimpleResizeBilinearTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout dataLayout);
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SimpleResizeBilinearTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
 
 // Tests the resize bilinear for minification of a square input matrix (also: input dimensions are a
 // multiple of output dimensions).
-LayerTestResult<float, 4> ResizeBilinearSqMinTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout dataLayout);
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeBilinearSqMinTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
 
 // Tests the resize bilinear for minification (output dimensions smaller than input dimensions).
-LayerTestResult<float, 4> ResizeBilinearMinTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout dataLayout);
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeBilinearMinTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
 
 // Tests the resize bilinear for magnification (output dimensions bigger than input dimensions).
-LayerTestResult<float, 4> ResizeBilinearMagTest(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
-    const armnn::DataLayout  dataLayout);
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeBilinearMagTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout  dataLayout);
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Rsqrt2dTestCommon(
@@ -1162,26 +1169,6 @@
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
-LayerTestResult<uint8_t, 4> ResizeBilinearNopUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> SimpleResizeBilinearUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> ResizeBilinearSqMinUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> ResizeBilinearMinUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
-LayerTestResult<uint8_t, 4> ResizeBilinearMagUint8Test(
-    armnn::IWorkloadFactory& workloadFactory,
-    const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
-
 LayerTestResult<uint8_t, 4> BatchNormUint8Test(
     armnn::IWorkloadFactory& workloadFactory,
     const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
@@ -2328,3 +2315,540 @@
 
     return ret;
 }
+
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeBilinearNopTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout)
+{
+    armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+                                        ?  armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
+                                        :  armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+                                         ?  armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
+                                         :  armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(1.5f);
+        inputTensorInfo.SetQuantizationOffset(-3);
+        outputTensorInfo.SetQuantizationScale(1.5f);
+        outputTensorInfo.SetQuantizationOffset(-3);
+    }
+
+    std::vector<float> inputData = armnn::IsQuantizedType<T>()
+                                   ? std::initializer_list<float>
+                                     {
+                                             1, 2, 3, 4,
+                                             2, 3, 4, 5,
+                                             3, 4, 5, 6,
+                                             4, 5, 6, 7
+                                     }
+                                   : std::initializer_list<float>
+                                     {
+                                             1.0f, 2.0f, 3.0f, 4.0f,
+                                             2.0f, 3.0f, 4.0f, 5.0f,
+                                             3.0f, 4.0f, 5.0f, 6.0f,
+                                             4.0f, 5.0f, 6.0f, 7.0f,
+
+                                             1.0f, 2.0f, 3.0f, 4.0f,
+                                             2.0f, 3.0f, 4.0f, 5.0f,
+                                             3.0f, 4.0f, 5.0f, 6.0f,
+                                             4.0f, 5.0f, 6.0f, 7.0f
+                                     };
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+        inputData = tmp;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+                                                                      inputTensorInfo.GetQuantizationOffset(),
+                                                                      inputData));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = input;
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeBilinearNopTest<armnn::DataType::Float32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> SimpleResizeBilinearTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout)
+{
+    armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+                                        ?  armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
+                                        :  armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+                                         ?  armnnUtils::GetTensorInfo(1, 1, 1, 1, dataLayout, ArmnnType)
+                                         :  armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, ArmnnType);
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(0.1567f);
+        inputTensorInfo.SetQuantizationOffset(1);
+        outputTensorInfo.SetQuantizationScale(0.1567f);
+        outputTensorInfo.SetQuantizationOffset(1);
+    }
+
+    std::vector<float> inputData = armnn::IsQuantizedType<T>()
+                                   ? std::initializer_list<float>
+                                     {
+                                             1, 255,
+                                             200, 250
+                                     }
+                                   : std::initializer_list<float>
+                                     {
+                                             1.0f, 255.0f,
+                                             200.0f, 250.0f,
+
+                                             250.0f, 200.0f,
+                                             250.0f,   1.0f
+                                     };
+
+    // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
+    // then figures out the interpolants and weights. Note this is different to projecting the centre of the
+    // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
+    // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
+    // which we would expect if projecting the centre).
+
+    std::vector<float> outputData = armnn::IsQuantizedType<T>()
+                                    ? std::initializer_list<float>
+                                      {
+                                              1
+                                      }
+                                    : std::initializer_list<float>
+                                      {
+                                              1.0f,
+
+                                              250.0f
+                                      };
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+                                                                      inputTensorInfo.GetQuantizationOffset(),
+                                                                      inputData));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+                                                                outputTensorInfo.GetQuantizationOffset(),
+                                                                outputData));
+
+    std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::Float32>, 4>
+SimpleResizeBilinearTest<armnn::DataType::Float32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeBilinearSqMinTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout)
+{
+    armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+                                        ?  armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
+                                        :  armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+                                         ?  armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
+                                         :  armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(3.141592f);
+        inputTensorInfo.SetQuantizationOffset(3);
+        outputTensorInfo.SetQuantizationScale(3.141592f);
+        outputTensorInfo.SetQuantizationOffset(3);
+    }
+
+        std::vector<float> inputData = armnn::IsQuantizedType<T>()
+                                       ? std::initializer_list<float>
+                                         {
+                                                1, 2, 3, 4,
+                                                2, 3, 4, 5,
+                                                3, 4, 5, 6,
+                                                4, 5, 6, 7
+                                         }
+                                       : std::initializer_list<float>
+                                         {
+                                                1.0f, 2.0f, 3.0f, 4.0f,
+                                                2.0f, 3.0f, 4.0f, 5.0f,
+                                                3.0f, 4.0f, 5.0f, 6.0f,
+                                                4.0f, 5.0f, 6.0f, 7.0f,
+
+                                                7.0f, 6.0f, 5.0f, 4.0f,
+                                                6.0f, 5.0f, 4.0f, 3.0f,
+                                                5.0f, 4.0f, 3.0f, 2.0f,
+                                                4.0f, 3.0f, 2.0f, 1.0f
+                                         };
+
+    std::vector<float> outputData = armnn::IsQuantizedType<T>()
+                                    ? std::initializer_list<float>
+                                      {
+                                              1, 3,
+                                              3, 5
+                                      }
+                                    : std::initializer_list<float>
+                                      {
+                                              1.0f, 3.0f,
+                                              3.0f, 5.0f,
+
+                                              7.0f, 5.0f,
+                                              5.0f, 3.0f
+                                      };
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+                                                                      inputTensorInfo.GetQuantizationOffset(),
+                                                                      inputData));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+                                                                outputTensorInfo.GetQuantizationOffset(),
+                                                                outputData));
+
+    std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::Float32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeBilinearMinTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout)
+{
+    armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+                                        ?  armnnUtils::GetTensorInfo(1, 1, 2, 3, dataLayout, ArmnnType)
+                                        :  armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+                                         ?  armnnUtils::GetTensorInfo(1, 1, 1, 2, dataLayout, ArmnnType)
+                                         :  armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, ArmnnType);
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(1.5f);
+        inputTensorInfo.SetQuantizationOffset(-1);
+        outputTensorInfo.SetQuantizationScale(1.5f);
+        outputTensorInfo.SetQuantizationOffset(-1);
+    }
+
+    std::vector<float> inputData = armnn::IsQuantizedType<T>()
+                                   ? std::initializer_list<float>
+                                     {
+                                             3.0f, 4.5f, 6.0f, // 1,  2,  3, : Expected quantised values
+                                             9.0f, 13.5f, 21.0f // 5,  8, 13
+                                     }
+                                   : std::initializer_list<float>
+                                     {
+                                             1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
+                                             13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
+                                             144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
+
+                                             987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
+                                             89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
+                                             8.0f, 5.0f, 3.0f, 2.0f, 1.0f
+                                     };
+
+    std::vector<float> outputData = armnn::IsQuantizedType<T>()
+                                    ? std::initializer_list<float>
+                                      {
+                                              3.0f, 5.25f // 1, 3
+                                      }
+                                    : std::initializer_list<float>
+                                      {
+                                              1.0f,   2.6666f,   6.00f,
+                                              78.5f, 179.3333f, 401.00f,
+
+                                              987.0f, 454.6670f, 203.33f,
+                                              48.5f,  22.3333f,  10.00f
+                                      };
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+                                                                      inputTensorInfo.GetQuantizationOffset(),
+                                                                      inputData));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+                                                                outputTensorInfo.GetQuantizationOffset(),
+                                                                outputData));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeBilinearMinTest<armnn::DataType::Float32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeBilinearMagTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout)
+{
+    armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+                                        ?  armnnUtils::GetTensorInfo(1, 1, 3, 2, dataLayout, ArmnnType)
+                                        :  armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+                                         ?  armnnUtils::GetTensorInfo(1, 1, 3, 5, dataLayout, ArmnnType)
+                                         :  armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(0.010765f);
+        inputTensorInfo.SetQuantizationOffset(7);
+        outputTensorInfo.SetQuantizationScale(0.010132f);
+        outputTensorInfo.SetQuantizationOffset(-18);
+    }
+
+    std::vector<float> inputData = armnn::IsQuantizedType<T>()
+                                   ? std::initializer_list<float>
+                                     {
+                                             0.183005f, 2.379065f, // 24, 228, : Expected quantised values
+                                             1.05497f, 1.302565f, // 105, 128,
+                                             2.400595f, 0.68896f // 230, 71
+                                     }
+                                   : std::initializer_list<float>
+                                     {
+                                             1.0f,   2.0f,
+                                             13.0f,  21.0f,
+                                             144.0f, 233.0f,
+
+                                             233.0f, 144.0f,
+                                             21.0f,  13.0f,
+                                             2.0f,   1.0f
+                                     };
+    std::vector<float> outputData = armnn::IsQuantizedType<T>()
+                                    ? std::initializer_list<float>
+                                      {
+                                              0.18300501f, 1.06142902f, 1.93985295f, 2.37906504f, 2.37906504f,
+                                              1.05497003f, 1.15400803f, 1.25304604f, 1.30256498f, 1.30256498f,
+                                              2.40059495f, 1.71594095f, 1.03128707f, 0.68896002f, 0.68896002f
+                                              // 0, 87, 173, 217, 217, : Expected quantised values
+                                              // 86, 96, 106, 111, 111,
+                                              // 219, 151, 84, 50, 50
+                                      }
+                                    : std::initializer_list<float>
+                                      {
+                                              1.0f,   1.4f,   1.8f,   2.0f,   2.0f,
+                                              13.0f,  16.2f,  19.4f,  21.0f,  21.0f,
+                                              144.0f, 179.6f, 215.2f, 233.0f, 233.0f,
+
+                                              233.0f, 197.4f, 161.8f, 144.0f, 144.0f,
+                                              21.0f,  17.8f,  14.6f,  13.0f,  13.0f,
+                                              2.0f,   1.6f,   1.2f,   1.0f,   1.0f
+                                      };
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+                                                                      inputTensorInfo.GetQuantizationOffset(),
+                                                                      inputData));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+                                                                outputTensorInfo.GetQuantizationOffset(),
+                                                                outputData));
+
+    std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::Float32>, 4>
+ResizeBilinearMagTest<armnn::DataType::Float32>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+template LayerTestResult<typename armnn::ResolveType<armnn::DataType::QuantisedAsymm8>, 4>
+ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
\ No newline at end of file
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index b92e88e..fee980c 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -245,18 +245,28 @@
 ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dTest, armnn::DataLayout::NHWC)
 
 // Resize Bilinear - NCHW data layout
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
 
 // Resize Bilinear - NHWC data layout
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
+                     ResizeBilinearNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
+                     SimpleResizeBilinearTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
+                     ResizeBilinearSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
+                     ResizeBilinearMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
+                     ResizeBilinearMagTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
 
 // Constant
 ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index f842892..4e719d2 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -471,24 +471,60 @@
 ARMNN_AUTO_TEST_CASE(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest)
 
 // Resize Bilinear - NCHW data layout
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest<armnn::DataType::Float32>, armnn::DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopUint8Test)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearUint8Test)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinUint8Test)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinUint8Test)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagUint8Test)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
+                     SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
+                     ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
+                     ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
+                     ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8,
+                     ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
 
 // Resize Bilinear - NHWC data layout
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
+                     ResizeBilinearNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
+                     SimpleResizeBilinearTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
+                     ResizeBilinearSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
+                     ResizeBilinearMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
+                     ResizeBilinearMagTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
+                     ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
+                     SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
+                     ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
+                     ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
+                     ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
 
 // Quantize
 ARMNN_AUTO_TEST_CASE(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index efdd1ef..fd01550 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -401,23 +401,68 @@
 ARMNN_AUTO_TEST_CASE(BatchNormInt16Nhwc, BatchNormInt16NhwcTest)
 
 // Resize Bilinear - NCHW
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8, SimpleResizeBilinearUint8Test)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8, ResizeBilinearNopUint8Test)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8, ResizeBilinearSqMinUint8Test)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8, ResizeBilinearMinUint8Test)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest, armnn::DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8, ResizeBilinearMagUint8Test)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear,
+                     SimpleResizeBilinearTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8,
+                     SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNop,
+                     ResizeBilinearNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8,
+                     ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin,
+                     ResizeBilinearSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8,
+                     ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMin,
+                     ResizeBilinearMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8,
+                     ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMag,
+                     ResizeBilinearMagTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8,
+                     ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
 
 // Resize Bilinear - NHWC
-ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinTest, armnn::DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagTest, armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc,
+                     ResizeBilinearNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopUint8Nhwc,
+                     ResizeBilinearNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc,
+                     SimpleResizeBilinearTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearUint8Nhwc,
+                     SimpleResizeBilinearTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc,
+                     ResizeBilinearSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinUint8Nhwc,
+                     ResizeBilinearSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc,
+                     ResizeBilinearMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinUint8Nhwc,
+                     ResizeBilinearMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc,
+                     ResizeBilinearMagTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagUint8Nhwc,
+                     ResizeBilinearMagTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
 
 // Fake Quantization
 ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest)