IVGCVSW-1932: Add Unit tests for NHWC ResizeBilinear

* Adds five unit tests that execute ResizeBilinear
  with the NHWC data layout and Float32 data type.
* Refactors original ResizeBilinear Float32 tests
  to take DataLayout as a parameter.
* Adds four unit tests that execute CreateWorkloadCl
  for both NCHW and NHWC (NCHW tests did not exist
  for CreateWorkloadCl).

Change-Id: I1af419ed0b62b8f4d4550f6d120a584a0a223b17
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 66f6282..f2c8b5a 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -807,13 +807,33 @@
 
 template <typename ResizeBilinearWorkload, armnn::DataType DataType>
 std::unique_ptr<ResizeBilinearWorkload> CreateResizeBilinearWorkloadTest(armnn::IWorkloadFactory& factory,
-    armnn::Graph& graph)
+                                                                         armnn::Graph& graph,
+                                                                         DataLayout dataLayout = DataLayout::NCHW)
 {
+    TensorShape inputShape;
+    TensorShape outputShape;
+    unsigned int heightIndex;
+    unsigned int widthIndex;
+
+    switch (dataLayout) {
+        case DataLayout::NHWC:
+            inputShape = { 2, 4, 4, 3 };
+            outputShape = { 2, 2, 2, 3 };
+            heightIndex = 1;
+            widthIndex = 2;
+            break;
+        default: // NCHW
+            inputShape = { 2, 3, 4, 4 };
+            outputShape = { 2, 3, 2, 2 };
+            heightIndex = 2;
+            widthIndex = 3;
+    }
+
     // Creates the layer we're testing.
-    TensorShape outputShape({ 2, 3, 2, 2 });
     ResizeBilinearDescriptor resizeDesc;
-    resizeDesc.m_TargetWidth = outputShape[3];
-    resizeDesc.m_TargetHeight = outputShape[2];
+    resizeDesc.m_TargetWidth = outputShape[widthIndex];
+    resizeDesc.m_TargetHeight = outputShape[heightIndex];
+    resizeDesc.m_DataLayout = dataLayout;
     Layer* const layer = graph.AddLayer<ResizeBilinearLayer>(resizeDesc, "layer");
 
     // Creates extra layers.
@@ -821,7 +841,7 @@
     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
 
     // Connects up.
-    armnn::TensorInfo inputTensorInfo({ 2, 3, 4, 4 }, DataType);
+    armnn::TensorInfo inputTensorInfo(inputShape, DataType);
     armnn::TensorInfo outputTensorInfo(outputShape, DataType);
     Connect(input, layer, inputTensorInfo);
     Connect(layer, output, outputTensorInfo);
@@ -833,6 +853,7 @@
     ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
     BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
     BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
diff --git a/src/backends/test/ArmComputeCl.cpp b/src/backends/test/ArmComputeCl.cpp
index d432a26..faa3d4f 100644
--- a/src/backends/test/ArmComputeCl.cpp
+++ b/src/backends/test/ArmComputeCl.cpp
@@ -187,13 +187,20 @@
 ARMNN_AUTO_TEST_CASE(L2Normalization3dNhwc, L2Normalization3dNhwcTest)
 ARMNN_AUTO_TEST_CASE(L2Normalization4dNhwc, L2Normalization4dNhwcTest)
 
-// Resize Bilinear
-ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest)
+// Resize Bilinear - NCHW data layout
 ARMNN_AUTO_TEST_CASE(ResizeBilinearNop, ResizeBilinearNopTest)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinear, SimpleResizeBilinearTest)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMin, ResizeBilinearSqMinTest)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMin, ResizeBilinearMinTest)
 ARMNN_AUTO_TEST_CASE(ResizeBilinearMag, ResizeBilinearMagTest)
 
+// Resize Bilinear - NHWC data layout
+ARMNN_AUTO_TEST_CASE(ResizeBilinearNopNhwc, ResizeBilinearNopNhwcTest)
+ARMNN_AUTO_TEST_CASE(SimpleResizeBilinearNhwc, SimpleResizeBilinearNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearSqMinNhwc, ResizeBilinearSqMinNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMinNhwc, ResizeBilinearMinNhwcTest)
+ARMNN_AUTO_TEST_CASE(ResizeBilinearMagNhwc, ResizeBilinearMagNhwcTest)
+
 // Constant
 ARMNN_AUTO_TEST_CASE(Constant, ConstantTest)
 ARMNN_AUTO_TEST_CASE(ConstantUint8, ConstantTestUint8)
diff --git a/src/backends/test/CreateWorkloadCl.cpp b/src/backends/test/CreateWorkloadCl.cpp
index e81f844..e7e39b0 100644
--- a/src/backends/test/CreateWorkloadCl.cpp
+++ b/src/backends/test/CreateWorkloadCl.cpp
@@ -596,5 +596,49 @@
     ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
 }
 
+template <typename ResizeBilinearWorkloadType, typename armnn::DataType DataType>
+static void ClResizeBilinearWorkloadTest(DataLayout dataLayout)
+{
+    Graph graph;
+    ClWorkloadFactory factory;
+
+    auto workload = CreateResizeBilinearWorkloadTest<ResizeBilinearWorkloadType, DataType>(factory, graph, dataLayout);
+
+    // Checks that inputs/outputs are as we expect them (see definition of CreateResizeBilinearWorkloadTest).
+    ResizeBilinearQueueDescriptor queueDescriptor = workload->GetData();
+    auto inputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
+    auto outputHandle = boost::polymorphic_downcast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
+
+    switch (dataLayout)
+    {
+        case DataLayout::NHWC:
+            BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
+            BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+            break;
+        default: // NCHW
+            BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
+            BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+    }
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NchwWorkload)
+{
+    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NchwWorkload)
+{
+    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32NhwcWorkload)
+{
+    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
+}
+
+BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16NhwcWorkload)
+{
+    ClResizeBilinearWorkloadTest<ClResizeBilinearFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
+}
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/test/LayerTests.cpp b/src/backends/test/LayerTests.cpp
index 066d0c2..a7fb6a8 100644
--- a/src/backends/test/LayerTests.cpp
+++ b/src/backends/test/LayerTests.cpp
@@ -2907,22 +2907,12 @@
     return Concatenation3dDim2DiffInputDimsTestImpl<float>(workloadFactory, 0.0f, 0);
 }
 
-LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
+LayerTestResult<float, 4> ResizeBilinearNopTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                    const armnn::TensorShape& inputOutputTensorShape,
+                                                    armnn::DataLayout dataLayout)
 {
-    constexpr unsigned int inputWidth = 4;
-    constexpr unsigned int inputHeight = 4;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
-
-    constexpr unsigned int outputWidth = inputWidth;
-    constexpr unsigned int outputHeight = inputHeight;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
-
-    const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::Float32);
-    const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::Float32);
+    const armnn::TensorInfo inputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(inputOutputTensorShape, armnn::DataType::Float32);
 
     auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
         1.0f, 2.0f, 3.0f, 4.0f,
@@ -2938,6 +2928,68 @@
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workloadFactory.Finalize();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+LayerTestResult<float, 4> ResizeBilinearNopTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // BatchSize = 1, Channels = 1, Height = 4, Width = 4
+    const armnn::TensorShape inputOutputShape{ 1, 1, 4, 4 };
+
+    return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // BatchSize = 1, Height = 4, Width = 4, Channels = 1
+    const armnn::TensorShape inputOutputShape{ 1, 4, 4, 1 };
+
+    return ResizeBilinearNopTestImpl(workloadFactory, inputOutputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> SimpleResizeBilinearTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                       const armnn::TensorShape& inputTensorShape,
+                                                       const armnn::TensorShape& outputTensorShape,
+                                                       armnn::DataLayout dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
+
+    auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
+          1.0f, 255.0f,
+        200.0f, 250.0f
+    }));
+
+    // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
+    // then figures out the interpolants and weights. Note this is different to projecting the centre of the
+    // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
+    // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
+    // the centre).
+    LayerTestResult<float, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
+        1.0f
+    }));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
     armnn::WorkloadInfo info;
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -2957,40 +3009,52 @@
 
 LayerTestResult<float, 4> SimpleResizeBilinearTest(armnn::IWorkloadFactory& workloadFactory)
 {
-    constexpr unsigned int inputWidth = 2;
-    constexpr unsigned int inputHeight = 2;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
+    // inputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
+    const armnn::TensorShape inputShape{ 1, 1, 2, 2 };
 
-    constexpr unsigned int outputWidth = inputWidth / 2;
-    constexpr unsigned int outputHeight = inputHeight / 2;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
+    // outputShape: BatchSize = 1, Channels = 1, Height = 1, Width = 1
+    const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
 
-    const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::Float32);
-    const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::Float32);
+    return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // inputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
+    const armnn::TensorShape inputShape{ 1, 2, 2, 1 };
+
+    // outputShape: BatchSize = 1, Height = 1, Width = 1, Channels = 1
+    const armnn::TensorShape outputShape{ 1, 1, 1, 1 };
+
+    return SimpleResizeBilinearTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> ResizeBilinearSqMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                      const armnn::TensorShape& inputTensorShape,
+                                                      const armnn::TensorShape& outputTensorShape,
+                                                      armnn::DataLayout dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
 
     auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
-        1.0f, 255.0f,
-        200.0f, 250.f,
+        1.0f, 2.0f, 3.0f, 4.0f,
+        2.0f, 3.0f, 4.0f, 5.0f,
+        3.0f, 4.0f, 5.0f, 6.0f,
+        4.0f, 5.0f, 6.0f, 7.0f
     }));
 
-    // The 'resize bilinear' operation projects the top-left corner of output texels into the input image,
-    // then figures out the interpolants and weights. Note this is different to projecting the centre of the
-    // output texel - and thus we'll expect the output 1x1 matrix to contain, as its single element, the value
-    // that was at position (0,0) of the input matrix (rather than an average, which we would expect if projecting
-    // the centre).
     LayerTestResult<float, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
-        1.0f
+        1.0f, 3.0f,
+        3.0f, 5.0f
     }));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
     armnn::WorkloadInfo info;
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3010,38 +3074,51 @@
 
 LayerTestResult<float, 4> ResizeBilinearSqMinTest(armnn::IWorkloadFactory& workloadFactory)
 {
-    constexpr unsigned int inputWidth = 4;
-    constexpr unsigned int inputHeight = 4;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
+    // inputShape: BatchSize = 1, Channels = 1, Height = 4, Width = 4
+    const armnn::TensorShape inputShape{ 1, 1, 4, 4 };
 
-    constexpr unsigned int outputWidth = inputWidth / 2;
-    constexpr unsigned int outputHeight = inputHeight / 2;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
+    // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 2
+    const armnn::TensorShape outputShape{ 1, 1, 2, 2 };
 
-    const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::Float32);
-    const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::Float32);
+    return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // inputShape: BatchSize = 1, Height = 4, Width = 4, Channels = 1
+    const armnn::TensorShape inputShape{ 1, 4, 4, 1 };
+
+    // outputShape: BatchSize = 1, Height = 2, Width = 2, Channels = 1
+    const armnn::TensorShape outputShape{ 1, 2, 2, 1 };
+
+    return ResizeBilinearSqMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> ResizeBilinearMinTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                    const armnn::TensorShape& inputTensorShape,
+                                                    const armnn::TensorShape& outputTensorShape,
+                                                    armnn::DataLayout dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
 
     auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
-        1.0f, 2.0f, 3.0f, 4.0f,
-        2.0f, 3.0f, 4.0f, 5.0f,
-        3.0f, 4.0f, 5.0f, 6.0f,
-        4.0f, 5.0f, 6.0f, 7.0f
+          1.0f,   2.0f,   3.0f,   5.0f,   8.0f,
+         13.0f,  21.0f,  34.0f,  55.0f,  89.0f,
+        144.0f, 233.0f, 377.0f, 610.0f, 987.0f
     }));
 
     LayerTestResult<float, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
-        1.f, 3.f,
-        3.f, 5.f
+         1.0f,   2.6666f,   6.0f,
+        78.5f, 179.3333f, 401.0f
     }));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
     armnn::WorkloadInfo info;
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3061,37 +3138,52 @@
 
 LayerTestResult<float, 4> ResizeBilinearMinTest(armnn::IWorkloadFactory& workloadFactory)
 {
-    constexpr unsigned int inputWidth = 5;
-    constexpr unsigned int inputHeight = 3;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
+    // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
+    const armnn::TensorShape inputShape{ 1, 1, 3, 5 };
 
-    constexpr unsigned int outputWidth = 3;
-    constexpr unsigned int outputHeight = 2;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
+    // outputShape: BatchSize = 1, Channels = 1, Height = 2, Width = 3
+    const armnn::TensorShape outputShape{ 1, 1, 2, 3 };
 
-    const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::Float32);
-    const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::Float32);
+    return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
+
+LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // inputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
+    const armnn::TensorShape inputShape{ 1, 3, 5, 1 };
+
+    // outputShape: BatchSize = 1, Height = 2, Width = 3, Channels = 1
+    const armnn::TensorShape outputShape{ 1, 2, 3, 1 };
+
+    return ResizeBilinearMinTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
+}
+
+LayerTestResult<float, 4> ResizeBilinearMagTestImpl(armnn::IWorkloadFactory& workloadFactory,
+                                                    const armnn::TensorShape& inputTensorShape,
+                                                    const armnn::TensorShape& outputTensorShape,
+                                                    armnn::DataLayout dataLayout)
+{
+    const armnn::TensorInfo inputTensorInfo(inputTensorShape, armnn::DataType::Float32);
+    const armnn::TensorInfo outputTensorInfo(outputTensorShape, armnn::DataType::Float32);
 
     auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
-          1.0f,   2.0f,   3.0f,   5.0f,   8.0f,
-         13.0f,  21.0f,  34.0f,  55.0f,  89.0f,
-        144.0f, 233.0f, 377.0f, 610.0f, 987.0f
+          1.0f,   2.0f,
+         13.0f,  21.0f,
+        144.0f, 233.0f
     }));
 
     LayerTestResult<float, 4> result(outputTensorInfo);
     result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
-        1.0f, 2.6666f, 6.0f,
-        78.5f, 179.3333f, 401.f
+          1.0f,   1.4f,   1.8f,   2.0f,   2.0f,
+         13.0f,  16.2f,  19.4f,  21.0f,  21.0f,
+        144.0f, 179.6f, 215.2f, 233.0f, 233.0f
     }));
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
 
     armnn::ResizeBilinearQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
     armnn::WorkloadInfo info;
     AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
     AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
@@ -3111,53 +3203,24 @@
 
 LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory)
 {
-    constexpr unsigned int inputWidth = 2;
-    constexpr unsigned int inputHeight = 3;
-    constexpr unsigned int inputChannels = 1;
-    constexpr unsigned int inputBatchSize = 1;
+    // inputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 2
+    const armnn::TensorShape inputShape{ 1, 1, 3, 2 };
 
-    constexpr unsigned int outputWidth = 5;
-    constexpr unsigned int outputHeight = 3;
-    constexpr unsigned int outputChannels = inputChannels;
-    constexpr unsigned int outputBatchSize = inputBatchSize;
+    // outputShape: BatchSize = 1, Channels = 1, Height = 3, Width = 5
+    const armnn::TensorShape outputShape{ 1, 1, 3, 5 };
 
-    const armnn::TensorInfo inputTensorInfo({ inputBatchSize, inputChannels, inputHeight, inputWidth },
-        armnn::DataType::Float32);
-    const armnn::TensorInfo outputTensorInfo({ outputBatchSize, outputChannels, outputHeight, outputWidth },
-        armnn::DataType::Float32);
+    return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NCHW);
+}
 
-    auto input = MakeTensor<float, 4>(inputTensorInfo, std::vector<float>({
-          1.0f,   2.0f,
-         13.0f,  21.0f,
-        144.0f, 233.0f
-    }));
+LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory)
+{
+    // inputShape: BatchSize = 1, Height = 3, Width = 2, Channels = 1
+    const armnn::TensorShape inputShape{ 1, 3, 2, 1 };
 
-    LayerTestResult<float, 4> result(outputTensorInfo);
-    result.outputExpected = MakeTensor<float, 4>(outputTensorInfo, std::vector<float>({
-         1.0f,   1.4f,   1.8f,   2.f,   2.f,
-         13.f,  16.2f,  19.4f,  21.f,  21.f,
-        144.f, 179.6f, 215.2f, 233.f, 233.f
-    }));
+    // outputShape: BatchSize = 1, Height = 3, Width = 5, Channels = 1
+    const armnn::TensorShape outputShape{ 1, 3, 5, 1 };
 
-    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
-    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
-
-    armnn::ResizeBilinearQueueDescriptor descriptor;
-    armnn::WorkloadInfo info;
-    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
-    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
-
-    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResizeBilinear(descriptor, info);
-
-    inputHandle->Allocate();
-    outputHandle->Allocate();
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
-
-    workloadFactory.Finalize();
-    workload->Execute();
-
-    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
-    return result;
+    return ResizeBilinearMagTestImpl(workloadFactory, inputShape, outputShape, armnn::DataLayout::NHWC);
 }
 
 LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory)
diff --git a/src/backends/test/LayerTests.hpp b/src/backends/test/LayerTests.hpp
index 12bcdd8..71a468b 100644
--- a/src/backends/test/LayerTests.hpp
+++ b/src/backends/test/LayerTests.hpp
@@ -243,6 +243,13 @@
 // Tests the resize bilinear for magnification (output dimensions bigger than input dimensions).
 LayerTestResult<float, 4> ResizeBilinearMagTest(armnn::IWorkloadFactory& workloadFactory);
 
+// Tests that execute Resize Bilinear with NHWC data layout
+LayerTestResult<float, 4> ResizeBilinearNopNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> SimpleResizeBilinearNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> ResizeBilinearSqMinNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> ResizeBilinearMinNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+LayerTestResult<float, 4> ResizeBilinearMagNhwcTest(armnn::IWorkloadFactory& workloadFactory);
+
 LayerTestResult<float, 4> BatchNormTest(armnn::IWorkloadFactory& workloadFactory);
 
 LayerTestResult<float, 2> FakeQuantizationTest(armnn::IWorkloadFactory& workloadFactory);