IVGCVSW-3365 Add reference workload support for ResizeLayer

Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Id551690065dca0686ce597d1f0c14fd73163481e
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 1d0be5d..e7915dd 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -915,12 +915,12 @@
     ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeBilinearQueueDescriptor", 4, "output");
 
     std::vector<DataType> supportedTypes =
-            {
-                    DataType::Float16,
-                    DataType::Float32,
-                    DataType::QuantisedAsymm8,
-                    DataType::QuantisedSymm16
-            };
+    {
+        DataType::Float16,
+        DataType::Float32,
+        DataType::QuantisedAsymm8,
+        DataType::QuantisedSymm16
+    };
 
     ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
                       supportedTypes,
@@ -931,29 +931,72 @@
                       "ResizeBilinearQueueDescriptor");
 
     // Resizes bilinear only changes width and height: batch and channel count must match.
+    const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
+    const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
+    if (inputBatchSize != outputBatchSize)
     {
-        const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
-        const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
-        if (inputBatchSize != outputBatchSize)
-        {
-            throw InvalidArgumentException(
-                boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
-                    "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
-        }
+        throw InvalidArgumentException(
+            boost::str(boost::format("ResizeBilinearQueueDescriptor: Input batch size (%1%) "
+                "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
     }
 
+    DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
+    const unsigned int inputChannelCount =
+        workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
+    const unsigned int outputChannelCount =
+        workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
+    if (inputChannelCount != outputChannelCount)
     {
-        DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
-        const unsigned int inputChannelCount =
+        throw InvalidArgumentException(
+            boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
+                "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
+    }
+}
+
+void ResizeQueueDescriptor::Validate(const WorkloadInfo& workloadInfo) const
+{
+    ValidateNumInputs(workloadInfo, "ResizeQueueDescriptor", 1);
+    ValidateNumOutputs(workloadInfo, "ResizeQueueDescriptor", 1);
+
+    ValidateTensorNumDimensions(workloadInfo.m_InputTensorInfos[0], "ResizeQueueDescriptor", 4, "input");
+    ValidateTensorNumDimensions(workloadInfo.m_OutputTensorInfos[0], "ResizeQueueDescriptor", 4, "output");
+
+    std::vector<DataType> supportedTypes =
+    {
+        DataType::Float16,
+        DataType::Float32,
+        DataType::QuantisedAsymm8,
+        DataType::QuantisedSymm16
+    };
+
+    ValidateDataTypes(workloadInfo.m_InputTensorInfos[0],
+                      supportedTypes,
+                      "ResizeQueueDescriptor");
+
+    ValidateDataTypes(workloadInfo.m_OutputTensorInfos[0],
+                      {workloadInfo.m_InputTensorInfos[0].GetDataType()},
+                      "ResizeQueueDescriptor");
+
+    // Resizes  only changes width and height: batch and channel count must match.
+    const unsigned int inputBatchSize = workloadInfo.m_InputTensorInfos[0].GetShape()[0];
+    const unsigned int outputBatchSize = workloadInfo.m_OutputTensorInfos[0].GetShape()[0];
+    if (inputBatchSize != outputBatchSize)
+    {
+        throw InvalidArgumentException(
+                boost::str(boost::format("ResizeQueueDescriptor: Input batch size (%1%) "
+                           "does not match output batch size (%2%)") % inputBatchSize % outputBatchSize));
+    }
+
+    DataLayoutIndexed dimensionIndices(m_Parameters.m_DataLayout);
+    const unsigned int inputChannelCount =
             workloadInfo.m_InputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
-        const unsigned int outputChannelCount =
+    const unsigned int outputChannelCount =
             workloadInfo.m_OutputTensorInfos[0].GetShape()[dimensionIndices.GetChannelsIndex()];
-        if (inputChannelCount != outputChannelCount)
-        {
-            throw InvalidArgumentException(
-                boost::str(boost::format("ResizeBilinearQueueDescriptor: Input channel count (%1%) "
-                    "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
-        }
+    if (inputChannelCount != outputChannelCount)
+    {
+        throw InvalidArgumentException(
+                boost::str(boost::format("ResizeQueueDescriptor: Input channel count (%1%) "
+                           "does not match output channel count (%2%)") % inputChannelCount % outputChannelCount));
     }
 }
 
diff --git a/src/backends/backendsCommon/test/LayerTests.hpp b/src/backends/backendsCommon/test/LayerTests.hpp
index b225e4d..405ccff 100644
--- a/src/backends/backendsCommon/test/LayerTests.hpp
+++ b/src/backends/backendsCommon/test/LayerTests.hpp
@@ -873,7 +873,7 @@
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager);
 
 
-/// Tests that the output should be identical to the input when the output dimensions match the input ones.
+// Tests that the output should be identical to the input when the output dimensions match the input ones.
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 4> ResizeBilinearNopTest(
         armnn::IWorkloadFactory& workloadFactory,
@@ -909,6 +909,42 @@
         const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
         const armnn::DataLayout  dataLayout);
 
+// Tests that the output should be identical to the input when the output dimensions match the input ones.
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeNearestNeighborNopTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+// Tests the behaviour of the resize NearestNeighbor operation when rescaling a 2x2 image into a 1x1 image.
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> SimpleResizeNearestNeighborTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+// Tests the resize NearestNeighbor for minification of a square input matrix (also: input dimensions are a
+// multiple of output dimensions).
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeNearestNeighborSqMinTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+// Tests the resize NearestNeighbor for minification (output dimensions smaller than input dimensions).
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeNearestNeighborMinTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout);
+
+// Tests the resize NearestNeighbor for magnification (output dimensions bigger than input dimensions).
+template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
+LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout  dataLayout);
+
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
 LayerTestResult<T, 2> Rsqrt2dTestCommon(
         armnn::IWorkloadFactory& workloadFactory,
@@ -2927,6 +2963,486 @@
     return result;
 }
 
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborNopTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout)
+{
+    armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+                                        ?  armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
+                                        :  armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+                                         ?  armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
+                                         :  armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(1.5f);
+        inputTensorInfo.SetQuantizationOffset(-3);
+        outputTensorInfo.SetQuantizationScale(1.5f);
+        outputTensorInfo.SetQuantizationOffset(-3);
+    }
+
+    std::vector<float> inputData = armnn::IsQuantizedType<T>()
+                                   ? std::initializer_list<float>
+                                           {
+                                                   1, 2, 3, 4,
+                                                   2, 3, 4, 5,
+                                                   3, 4, 5, 6,
+                                                   4, 5, 6, 7
+                                           }
+                                   : std::initializer_list<float>
+                                           {
+                                                   1.0f, 2.0f, 3.0f, 4.0f,
+                                                   2.0f, 3.0f, 4.0f, 5.0f,
+                                                   3.0f, 4.0f, 5.0f, 6.0f,
+                                                   4.0f, 5.0f, 6.0f, 7.0f,
+
+                                                   1.0f, 2.0f, 3.0f, 4.0f,
+                                                   2.0f, 3.0f, 4.0f, 5.0f,
+                                                   3.0f, 4.0f, 5.0f, 6.0f,
+                                                   4.0f, 5.0f, 6.0f, 7.0f
+                                           };
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+        inputData = tmp;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+                                                                      inputTensorInfo.GetQuantizationOffset(),
+                                                                      inputData));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = input;
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> SimpleResizeNearestNeighborTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout)
+{
+    armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+                                        ?  armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
+                                        :  armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+                                         ?  armnnUtils::GetTensorInfo(1, 1, 1, 1, dataLayout, ArmnnType)
+                                         :  armnnUtils::GetTensorInfo(1, 2, 1, 1, dataLayout, ArmnnType);
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(0.1567f);
+        inputTensorInfo.SetQuantizationOffset(1);
+        outputTensorInfo.SetQuantizationScale(0.1567f);
+        outputTensorInfo.SetQuantizationOffset(1);
+    }
+
+    std::vector<float> inputData = armnn::IsQuantizedType<T>()
+                                   ? std::initializer_list<float>
+                                           {
+                                                   1, 255,
+                                                   200, 250
+                                           }
+                                   : std::initializer_list<float>
+                                           {
+                                                   1.0f, 255.0f,
+                                                   200.0f, 250.0f,
+
+                                                   250.0f, 200.0f,
+                                                   250.0f,   1.0f
+                                           };
+
+    // The 'resize' operation projects the top-left corner of output texels into the input image,
+    // then figures out the interpolants and weights. Note this is different to projecting the centre of the
+    // output texel. Thus, for a input matrix of 2x2, we'll expect the output 1x1 matrix to contain, as
+    // its single element, the value that was at position (0,0) of the input matrix (rather than an average,
+    // which we would expect if projecting the centre).
+
+    std::vector<float> outputData = armnn::IsQuantizedType<T>()
+                                    ? std::initializer_list<float>
+                                            {
+                                                    1
+                                            }
+                                    : std::initializer_list<float>
+                                            {
+                                                    1.0f,
+
+                                                    250.0f
+                                            };
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+                                                                      inputTensorInfo.GetQuantizationOffset(),
+                                                                      inputData));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+                                                                outputTensorInfo.GetQuantizationOffset(),
+                                                                outputData));
+
+    std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborSqMinTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout)
+{
+    armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+                                        ?  armnnUtils::GetTensorInfo(1, 1, 4, 4, dataLayout, ArmnnType)
+                                        :  armnnUtils::GetTensorInfo(1, 2, 4, 4, dataLayout, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+                                         ?  armnnUtils::GetTensorInfo(1, 1, 2, 2, dataLayout, ArmnnType)
+                                         :  armnnUtils::GetTensorInfo(1, 2, 2, 2, dataLayout, ArmnnType);
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(3.141592f);
+        inputTensorInfo.SetQuantizationOffset(3);
+        outputTensorInfo.SetQuantizationScale(3.141592f);
+        outputTensorInfo.SetQuantizationOffset(3);
+    }
+
+    std::vector<float> inputData = armnn::IsQuantizedType<T>()
+                                   ? std::initializer_list<float>
+                                           {
+                                                  1, 2, 3, 4,
+                                                  2, 3, 4, 5,
+                                                  3, 4, 5, 6,
+                                                  4, 5, 6, 7
+                                           }
+                                   : std::initializer_list<float>
+                                           {
+                                                   1.0f, 2.0f, 3.0f, 4.0f,
+                                                   2.0f, 3.0f, 4.0f, 5.0f,
+                                                   3.0f, 4.0f, 5.0f, 6.0f,
+                                                   4.0f, 5.0f, 6.0f, 7.0f,
+
+                                                   7.0f, 6.0f, 5.0f, 4.0f,
+                                                   6.0f, 5.0f, 4.0f, 3.0f,
+                                                   5.0f, 4.0f, 3.0f, 2.0f,
+                                                   4.0f, 3.0f, 2.0f, 1.0f
+                                           };
+
+    std::vector<float> outputData = armnn::IsQuantizedType<T>()
+                                    ? std::initializer_list<float>
+                                            {
+                                                    1, 3,
+                                                    3, 5
+                                            }
+                                    : std::initializer_list<float>
+                                            {
+                                                    1.0f, 3.0f,
+                                                    3.0f, 5.0f,
+
+                                                    7.0f, 5.0f,
+                                                    5.0f, 3.0f
+                                            };
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+                                                                      inputTensorInfo.GetQuantizationOffset(),
+                                                                      inputData));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+                                                                outputTensorInfo.GetQuantizationOffset(),
+                                                                outputData));
+
+    std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborMinTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout)
+{
+    armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+                                        ?  armnnUtils::GetTensorInfo(1, 1, 2, 3, dataLayout, ArmnnType)
+                                        :  armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+                                         ?  armnnUtils::GetTensorInfo(1, 1, 1, 2, dataLayout, ArmnnType)
+                                         :  armnnUtils::GetTensorInfo(1, 2, 2, 3, dataLayout, ArmnnType);
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(1.5f);
+        inputTensorInfo.SetQuantizationOffset(-1);
+        outputTensorInfo.SetQuantizationScale(1.5f);
+        outputTensorInfo.SetQuantizationOffset(-1);
+    }
+
+    std::vector<float> inputData = armnn::IsQuantizedType<T>()
+                                   ? std::initializer_list<float>
+                                           {
+                                                   3.0f, 4.5f, 6.0f, // 1,  2,  3, : Expected quantised values
+                                                   9.0f, 13.5f, 21.0f // 5,  8, 13
+                                           }
+                                   : std::initializer_list<float>
+                                           {
+                                                   1.0f, 2.0f, 3.0f, 5.0f, 8.0f,
+                                                   13.0f, 21.0f, 34.0f, 55.0f, 89.0f,
+                                                   144.0f, 233.0f, 377.0f, 610.0f, 987.0f,
+
+                                                   987.0f, 610.0f, 377.0f, 233.0f, 144.0f,
+                                                   89.0f, 55.0f, 34.0f, 21.0f, 13.0f,
+                                                   8.0f, 5.0f, 3.0f, 2.0f, 1.0f
+                                           };
+
+    std::vector<float> outputData = armnn::IsQuantizedType<T>()
+                                    ? std::initializer_list<float>
+                                            {
+                                                    3.0f, 4.5f // 1, 3
+                                            }
+                                    : std::initializer_list<float>
+                                            {
+                                                    1.f,   2.f,   5.f,
+                                                   13.f,  21.f,  55.f,
+
+                                                  987.f, 610.f, 233.f,
+                                                   89.f,  55.f,  21.f
+                                            };
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+                                                                      inputTensorInfo.GetQuantizationOffset(),
+                                                                      inputData));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+                                                                outputTensorInfo.GetQuantizationOffset(),
+                                                                outputData));
+
+    std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+template<armnn::DataType ArmnnType, typename T>
+LayerTestResult<T, 4> ResizeNearestNeighborMagTest(
+        armnn::IWorkloadFactory& workloadFactory,
+        const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
+        const armnn::DataLayout dataLayout)
+{
+    armnn::TensorInfo inputTensorInfo = armnn::IsQuantizedType<T>()
+                                        ?  armnnUtils::GetTensorInfo(1, 1, 3, 2, dataLayout, ArmnnType)
+                                        :  armnnUtils::GetTensorInfo(1, 2, 3, 2, dataLayout, ArmnnType);
+    armnn::TensorInfo outputTensorInfo = armnn::IsQuantizedType<T>()
+                                         ?  armnnUtils::GetTensorInfo(1, 1, 3, 5, dataLayout, ArmnnType)
+                                         :  armnnUtils::GetTensorInfo(1, 2, 3, 5, dataLayout, ArmnnType);
+
+    if (armnn::IsQuantizedType<T>())
+    {
+        inputTensorInfo.SetQuantizationScale(0.010765f);
+        inputTensorInfo.SetQuantizationOffset(7);
+        outputTensorInfo.SetQuantizationScale(0.010132f);
+        outputTensorInfo.SetQuantizationOffset(-18);
+    }
+
+    std::vector<float> inputData = armnn::IsQuantizedType<T>()
+                                   ? std::initializer_list<float>
+                                           {
+                                                   0.183005f, 2.379065f, // 24, 228, : Expected quantised values
+                                                   1.05497f, 1.302565f, // 105, 128,
+                                                   2.400595f, 0.68896f // 230, 71
+                                           }
+                                   : std::initializer_list<float>
+                                           {
+                                                   1.0f,   2.0f,
+                                                   13.0f,  21.0f,
+                                                   144.0f, 233.0f,
+
+                                                   233.0f, 144.0f,
+                                                   21.0f,  13.0f,
+                                                   2.0f,   1.0f
+                                           };
+    std::vector<float> outputData = armnn::IsQuantizedType<T>()
+                                    ? std::initializer_list<float>
+                                            {
+                                                    0.183005f, 0.183005f, 0.183005f, 2.379065f, 2.379065f,
+                                                    1.05497f,  1.05497f,  1.05497f,  1.302565f, 1.302565f,
+                                                    2.400595f, 2.400595f, 2.400595f, 0.68896f,  0.68896f
+                                            }
+                                    : std::initializer_list<float>
+                                            {
+                                                      1.f,   1.f,   1.f,   2.f,   2.f,
+                                                     13.f,  13.f,  13.f,  21.f,  21.f,
+                                                    144.f, 144.f, 144.f, 233.f, 233.f,
+
+                                                    233.f, 233.f, 233.f, 144.f, 144.f,
+                                                     21.f,  21.f,  21.f,  13.f,  13.f,
+                                                      2.f,   2.f,   2.f,   1.f,   1.f
+                                            };
+
+    const armnn::PermutationVector NCHWToNHWC = { 0, 3, 1, 2 };
+    if (dataLayout == armnn::DataLayout::NHWC)
+    {
+        std::vector<float> tmp(inputData.size());
+        armnnUtils::Permute(inputTensorInfo.GetShape(), NCHWToNHWC, inputData.data(), tmp.data(), sizeof(float));
+        inputData = tmp;
+
+        std::vector<float> tmp1(outputData.size());
+        armnnUtils::Permute(outputTensorInfo.GetShape(), NCHWToNHWC, outputData.data(), tmp1.data(), sizeof(float));
+        outputData = tmp1;
+    }
+
+    auto input = MakeTensor<T, 4>(inputTensorInfo, QuantizedVector<T>(inputTensorInfo.GetQuantizationScale(),
+                                                                      inputTensorInfo.GetQuantizationOffset(),
+                                                                      inputData));
+
+    LayerTestResult<T, 4> result(outputTensorInfo);
+    result.outputExpected = MakeTensor<T, 4>(outputTensorInfo,
+                                             QuantizedVector<T>(outputTensorInfo.GetQuantizationScale(),
+                                                                outputTensorInfo.GetQuantizationOffset(),
+                                                                outputData));
+
+    std::unique_ptr <armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
+    std::unique_ptr <armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
+
+    armnn::ResizeQueueDescriptor descriptor;
+    descriptor.m_Parameters.m_DataLayout = dataLayout;
+    descriptor.m_Parameters.m_Method = armnn::ResizeMethod::NearestNeighbor;
+    armnn::WorkloadInfo info;
+    AddInputToWorkload(descriptor, info, inputTensorInfo, inputHandle.get());
+    AddOutputToWorkload(descriptor, info, outputTensorInfo, outputHandle.get());
+
+    std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateResize(descriptor, info);
+
+    inputHandle->Allocate();
+    outputHandle->Allocate();
+    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+
+    workload->PostAllocationConfigure();
+    workload->Execute();
+
+    CopyDataFromITensorHandle(&result.output[0][0][0][0], outputHandle.get());
+    return result;
+}
+
+
 template<armnn::DataType ArmnnType, typename T, std::size_t InputDim, std::size_t OutputDim>
 LayerTestResult<T, OutputDim> MeanTestHelper(
         armnn::IWorkloadFactory& workloadFactory,
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 429993a..b563bad 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1239,11 +1239,11 @@
 {
     bool supported = true;
     std::array<DataType,3> supportedTypes =
-            {
-                    DataType::Float32,
-                    DataType::QuantisedAsymm8,
-                    DataType::QuantisedSymm16
-            };
+    {
+        DataType::Float32,
+        DataType::QuantisedAsymm8,
+        DataType::QuantisedSymm16
+    };
 
     supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
                                   "Reference ResizeBilinear: input type not supported");
@@ -1257,6 +1257,31 @@
     return supported;
 }
 
+bool RefLayerSupport::IsResizeSupported(const TensorInfo& input,
+                                        const TensorInfo& output,
+                                        const ResizeDescriptor& descriptor,
+                                        Optional<std::string&> reasonIfUnsupported) const
+{
+    bool supported = true;
+    std::array<DataType,3> supportedTypes =
+    {
+        DataType::Float32,
+        DataType::QuantisedAsymm8,
+        DataType::QuantisedSymm16
+    };
+
+    supported &= CheckSupportRule(TypeAnyOf(input, supportedTypes), reasonIfUnsupported,
+                                  "Reference Resize: input type not supported");
+
+    supported &= CheckSupportRule(TypeAnyOf(output, supportedTypes), reasonIfUnsupported,
+                                  "Reference Resize: output type not supported");
+
+    supported &= CheckSupportRule(TypesAreEqual(input, output), reasonIfUnsupported,
+                                  "Reference Resize: input and output types not matching");
+
+    return supported;
+}
+
 bool RefLayerSupport::IsRsqrtSupported(const TensorInfo& input,
                                        const TensorInfo& output,
                                        Optional<std::string&> reasonIfUnsupported) const
diff --git a/src/backends/reference/RefLayerSupport.hpp b/src/backends/reference/RefLayerSupport.hpp
index 9c397fe..22b007b 100644
--- a/src/backends/reference/RefLayerSupport.hpp
+++ b/src/backends/reference/RefLayerSupport.hpp
@@ -222,6 +222,11 @@
                                    const TensorInfo& output,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsResizeSupported(const TensorInfo& input,
+                           const TensorInfo& output,
+                           const ResizeDescriptor& descriptor,
+                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsRsqrtSupported(const TensorInfo& input,
                           const TensorInfo& output,
                           Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
diff --git a/src/backends/reference/RefWorkloadFactory.cpp b/src/backends/reference/RefWorkloadFactory.cpp
index d906f93..95a4419 100644
--- a/src/backends/reference/RefWorkloadFactory.cpp
+++ b/src/backends/reference/RefWorkloadFactory.cpp
@@ -239,6 +239,16 @@
     return std::make_unique<CopyMemGenericWorkload>(descriptor, info);
 }
 
+std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResize(const ResizeQueueDescriptor& descriptor,
+                                                            const WorkloadInfo& info) const
+{
+    if (IsFloat16(info))
+    {
+        return MakeWorkload<NullWorkload, NullWorkload>(descriptor, info);
+    }
+    return std::make_unique<RefResizeWorkload>(descriptor, info);
+}
+
 std::unique_ptr<IWorkload> RefWorkloadFactory::CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
                                                                     const WorkloadInfo& info) const
 {
diff --git a/src/backends/reference/RefWorkloadFactory.hpp b/src/backends/reference/RefWorkloadFactory.hpp
index 44cb079..1a40259 100644
--- a/src/backends/reference/RefWorkloadFactory.hpp
+++ b/src/backends/reference/RefWorkloadFactory.hpp
@@ -106,6 +106,9 @@
     std::unique_ptr<IWorkload> CreateMemCopy(const MemCopyQueueDescriptor& descriptor,
                                              const WorkloadInfo& info) const override;
 
+    std::unique_ptr<IWorkload> CreateResize(const ResizeQueueDescriptor& descriptor,
+                                            const WorkloadInfo& info) const override;
+
     std::unique_ptr<IWorkload> CreateResizeBilinear(const ResizeBilinearQueueDescriptor& descriptor,
                                                     const WorkloadInfo& info) const override;
 
diff --git a/src/backends/reference/backend.mk b/src/backends/reference/backend.mk
index 6fb17b5..7995654 100644
--- a/src/backends/reference/backend.mk
+++ b/src/backends/reference/backend.mk
@@ -54,6 +54,7 @@
         workloads/RefQuantizeWorkload.cpp \
         workloads/RefReshapeWorkload.cpp \
         workloads/RefResizeBilinearWorkload.cpp \
+        workloads/RefResizeWorkload.cpp \
         workloads/RefRsqrtWorkload.cpp \
         workloads/RefSoftmaxWorkload.cpp \
         workloads/RefSpaceToBatchNdWorkload.cpp \
@@ -61,7 +62,7 @@
         workloads/RefStridedSliceWorkload.cpp \
         workloads/RefSplitterWorkload.cpp \
         workloads/RefTransposeConvolution2dWorkload.cpp \
-        workloads/ResizeBilinear.cpp \
+        workloads/Resize.cpp \
         workloads/Rsqrt.cpp \
         workloads/SpaceToBatchNd.cpp \
         workloads/SpaceToDepth.cpp \
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index 80d5319..7797f17 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -607,6 +607,100 @@
                      ResizeBilinearNopTest<armnn::DataType::QuantisedSymm16>,
                      armnn::DataLayout::NHWC)
 
+// Resize NearestNeighbor - NCHW
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighbor,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNop,
+                     ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8,
+                     ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(esizeNearestNeighborNopUint16,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMin,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMin,
+                     ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8,
+                     ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMag,
+                     ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8,
+                     ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NCHW)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedSymm16>,
+                     armnn::DataLayout::NCHW)
+
+// Resize NearestNeighbor - NHWC
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopNhwc,
+                     ResizeNearestNeighborNopTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint8Nhwc,
+                     ResizeNearestNeighborNopTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborNopUint16Nhwc,
+                     ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborNhwc,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint8Nhwc,
+                     SimpleResizeNearestNeighborTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(SimpleResizeNearestNeighborUint16Nhwc,
+                     ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinNhwc,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint8Nhwc,
+                     ResizeNearestNeighborSqMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborSqMinUint16Nhwc,
+                     ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinNhwc,
+                     ResizeNearestNeighborMinTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint8Nhwc,
+                     ResizeNearestNeighborMinTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMinUint16Nhwc,
+                     ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagNhwc,
+                     ResizeNearestNeighborMagTest<armnn::DataType::Float32>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint8Nhwc,
+                     ResizeNearestNeighborMagTest<armnn::DataType::QuantisedAsymm8>,
+                     armnn::DataLayout::NHWC)
+ARMNN_AUTO_TEST_CASE(ResizeNearestNeighborMagUint16Nhwc,
+                     ResizeNearestNeighborNopTest<armnn::DataType::QuantisedSymm16>,
+                     armnn::DataLayout::NHWC)
+
 // Fake Quantization
 ARMNN_AUTO_TEST_CASE(FakeQuantization, FakeQuantizationTest)
 
diff --git a/src/backends/reference/workloads/CMakeLists.txt b/src/backends/reference/workloads/CMakeLists.txt
index 9be245b..3c0af01 100644
--- a/src/backends/reference/workloads/CMakeLists.txt
+++ b/src/backends/reference/workloads/CMakeLists.txt
@@ -96,6 +96,8 @@
     RefReshapeWorkload.hpp
     RefResizeBilinearWorkload.cpp
     RefResizeBilinearWorkload.hpp
+    RefResizeWorkload.cpp
+    RefResizeWorkload.hpp
     RefRsqrtWorkload.cpp
     RefRsqrtWorkload.hpp
     RefSoftmaxWorkload.cpp
@@ -112,8 +114,8 @@
     RefTransposeConvolution2dWorkload.hpp
     RefWorkloads.hpp
     RefWorkloadUtils.hpp
-    ResizeBilinear.cpp
-    ResizeBilinear.hpp
+    Resize.cpp
+    Resize.hpp
     Rsqrt.cpp
     Rsqrt.hpp
     Softmax.cpp
diff --git a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
index 03fcec2..fc27c0f 100644
--- a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
+++ b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
@@ -6,7 +6,7 @@
 #include "RefResizeBilinearWorkload.hpp"
 
 #include "RefWorkloadUtils.hpp"
-#include "ResizeBilinear.hpp"
+#include "Resize.hpp"
 #include "BaseIterator.hpp"
 #include "Profiling.hpp"
 
@@ -29,7 +29,7 @@
     std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
     Encoder<float> &encoder = *encoderPtr;
 
-    ResizeBilinear(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout);
+    Resize(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout, armnn::ResizeMethod::Bilinear);
 }
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/RefResizeWorkload.cpp b/src/backends/reference/workloads/RefResizeWorkload.cpp
new file mode 100644
index 0000000..26225f8
--- /dev/null
+++ b/src/backends/reference/workloads/RefResizeWorkload.cpp
@@ -0,0 +1,35 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefResizeWorkload.hpp"
+
+#include "RefWorkloadUtils.hpp"
+#include "Resize.hpp"
+#include "BaseIterator.hpp"
+#include "Profiling.hpp"
+
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+
+namespace armnn
+{
+
+void RefResizeWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeWorkload_Execute");
+
+    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+    Decoder<float> &decoder = *decoderPtr;
+    std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+    Encoder<float> &encoder = *encoderPtr;
+
+    Resize(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout, m_Data.m_Parameters.m_Method);
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefResizeWorkload.hpp b/src/backends/reference/workloads/RefResizeWorkload.hpp
new file mode 100644
index 0000000..1ddfcdf
--- /dev/null
+++ b/src/backends/reference/workloads/RefResizeWorkload.hpp
@@ -0,0 +1,21 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+#include <backendsCommon/WorkloadData.hpp>
+
+namespace armnn
+{
+
+class RefResizeWorkload : public BaseWorkload<ResizeQueueDescriptor>
+{
+public:
+    using BaseWorkload<ResizeQueueDescriptor>::BaseWorkload;
+    virtual void Execute() const override;
+};
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/RefWorkloads.hpp b/src/backends/reference/workloads/RefWorkloads.hpp
index 3a094c8..4bdf05d 100644
--- a/src/backends/reference/workloads/RefWorkloads.hpp
+++ b/src/backends/reference/workloads/RefWorkloads.hpp
@@ -40,6 +40,7 @@
 #include "RefPreluWorkload.hpp"
 #include "RefQuantizeWorkload.hpp"
 #include "RefResizeBilinearWorkload.hpp"
+#include "RefResizeWorkload.hpp"
 #include "RefRsqrtWorkload.hpp"
 #include "RefReshapeWorkload.hpp"
 #include "RefSplitterWorkload.hpp"
@@ -49,7 +50,7 @@
 #include "RefSpaceToDepthWorkload.hpp"
 #include "RefTransposeConvolution2dWorkload.hpp"
 #include "RefWorkloadUtils.hpp"
-#include "ResizeBilinear.hpp"
+#include "Resize.hpp"
 #include "Softmax.hpp"
 #include "Splitter.hpp"
 #include "TensorBufferArrayView.hpp"
\ No newline at end of file
diff --git a/src/backends/reference/workloads/Resize.cpp b/src/backends/reference/workloads/Resize.cpp
new file mode 100644
index 0000000..0e0bdd7
--- /dev/null
+++ b/src/backends/reference/workloads/Resize.cpp
@@ -0,0 +1,130 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "Resize.hpp"
+
+#include "TensorBufferArrayView.hpp"
+
+#include <boost/numeric/conversion/cast.hpp>
+
+#include <cmath>
+#include <algorithm>
+
+using namespace armnnUtils;
+
+namespace armnn
+{
+
+namespace
+{
+
+inline float Lerp(float a, float b, float w)
+{
+    return w * b + (1.f - w) * a;
+}
+
+}// anonymous namespace
+
+void Resize(Decoder<float>&   in,
+            const TensorInfo& inputInfo,
+            Encoder<float>&   out,
+            const TensorInfo& outputInfo,
+            DataLayoutIndexed dataLayout,
+            armnn::ResizeMethod resizeMethod)
+{
+    // We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output
+    // image is projected into the input image to figure out the interpolants and weights. Note that this
+    // will yield different results than if projecting the centre of output texels.
+
+    const unsigned int batchSize = inputInfo.GetShape()[0];
+    const unsigned int channelCount = inputInfo.GetShape()[dataLayout.GetChannelsIndex()];
+
+    const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
+    const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
+    const unsigned int outputHeight = outputInfo.GetShape()[dataLayout.GetHeightIndex()];
+    const unsigned int outputWidth = outputInfo.GetShape()[dataLayout.GetWidthIndex()];
+
+    // How much to scale pixel coordinates in the output image, to get the corresponding pixel coordinates
+    // in the input image.
+    const float scaleY = boost::numeric_cast<float>(inputHeight) / boost::numeric_cast<float>(outputHeight);
+    const float scaleX = boost::numeric_cast<float>(inputWidth) / boost::numeric_cast<float>(outputWidth);
+
+    TensorShape inputShape =  inputInfo.GetShape();
+    TensorShape outputShape =  outputInfo.GetShape();
+
+    for (unsigned int n = 0; n < batchSize; ++n)
+    {
+        for (unsigned int c = 0; c < channelCount; ++c)
+        {
+            for (unsigned int y = 0; y < outputHeight; ++y)
+            {
+                // Corresponding real-valued height coordinate in input image.
+                const float iy = boost::numeric_cast<float>(y) * scaleY;
+
+                // Discrete height coordinate of top-left texel (in the 2x2 texel area used for interpolation).
+                const float fiy = floorf(iy);
+                const unsigned int y0 = boost::numeric_cast<unsigned int>(fiy);
+
+                // Interpolation weight (range [0,1]).
+                const float yw = iy - fiy;
+
+                for (unsigned int x = 0; x < outputWidth; ++x)
+                {
+                    // Real-valued and discrete width coordinates in input image.
+                    const float ix = boost::numeric_cast<float>(x) * scaleX;
+                    const float fix = floorf(ix);
+                    const unsigned int x0 = boost::numeric_cast<unsigned int>(fix);
+
+                    // Interpolation weight (range [0,1]).
+                    const float xw = ix - fix;
+
+                    // Discrete width/height coordinates of texels below and to the right of (x0, y0).
+                    const unsigned int x1 = std::min(x0 + 1, inputWidth - 1u);
+                    const unsigned int y1 = std::min(y0 + 1, inputHeight - 1u);
+
+                    float interpolatedValue;
+                    switch (resizeMethod)
+                    {
+                        case armnn::ResizeMethod::Bilinear:
+                        {
+                            in[dataLayout.GetIndex(inputShape, n, c, y0, x0)];
+                            float input1 = in.Get();
+                            in[dataLayout.GetIndex(inputShape, n, c, y0, x1)];
+                            float input2 = in.Get();
+                            in[dataLayout.GetIndex(inputShape, n, c, y1, x0)];
+                            float input3 = in.Get();
+                            in[dataLayout.GetIndex(inputShape, n, c, y1, x1)];
+                            float input4 = in.Get();
+
+                            const float ly0 = Lerp(input1, input2, xw); // lerp along row y0.
+                            const float ly1 = Lerp(input3, input4, xw); // lerp along row y1.
+                            interpolatedValue = Lerp(ly0, ly1, yw);
+                            break;
+                        }
+                        case armnn::ResizeMethod::NearestNeighbor:
+                        default:
+                        {
+                            auto distance0 = std::sqrt(pow(fix - boost::numeric_cast<float>(x0), 2) + 
+                                                       pow(fiy - boost::numeric_cast<float>(y0), 2));
+                            auto distance1 = std::sqrt(pow(fix - boost::numeric_cast<float>(x1), 2) +
+                                                       pow(fiy - boost::numeric_cast<float>(y1), 2));
+
+                            unsigned int xNearest = distance0 <= distance1? x0 : x1;
+                            unsigned int yNearest = distance0 <= distance1? y0 : y1;
+
+                            in[dataLayout.GetIndex(inputShape, n, c, yNearest, xNearest)];
+                            interpolatedValue = in.Get();
+                            break;
+                        }
+                    }
+                    out[dataLayout.GetIndex(outputShape, n, c, y, x)];
+                    out.Set(interpolatedValue);
+                }
+            }
+        }
+    }
+}
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/Resize.hpp b/src/backends/reference/workloads/Resize.hpp
new file mode 100644
index 0000000..8bd8999
--- /dev/null
+++ b/src/backends/reference/workloads/Resize.hpp
@@ -0,0 +1,23 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include "BaseIterator.hpp"
+#include <armnn/Tensor.hpp>
+
+#include <DataLayoutIndexed.hpp>
+
+namespace armnn
+{
+
+void Resize(Decoder<float>&               in,
+            const TensorInfo&             inputInfo,
+            Encoder<float>&               out,
+            const TensorInfo&             outputInfo,
+            armnnUtils::DataLayoutIndexed dataLayout = DataLayout::NCHW,
+            ResizeMethod                  resizeMethod = ResizeMethod::NearestNeighbor);
+
+} //namespace armnn
diff --git a/src/backends/reference/workloads/ResizeBilinear.cpp b/src/backends/reference/workloads/ResizeBilinear.cpp
deleted file mode 100644
index 70a0514..0000000
--- a/src/backends/reference/workloads/ResizeBilinear.cpp
+++ /dev/null
@@ -1,108 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#include "ResizeBilinear.hpp"
-
-#include "TensorBufferArrayView.hpp"
-
-#include <boost/numeric/conversion/cast.hpp>
-
-#include <cmath>
-#include <algorithm>
-
-using namespace armnnUtils;
-
-namespace armnn
-{
-
-namespace
-{
-
-inline float Lerp(float a, float b, float w)
-{
-    return w * b + (1.f - w) * a;
-}
-
-}
-
-void ResizeBilinear(Decoder<float>&   in,
-                    const TensorInfo& inputInfo,
-                    Encoder<float>&   out,
-                    const TensorInfo& outputInfo,
-                    DataLayoutIndexed dataLayout)
-{
-    // We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output
-    // image is projected into the input image to figure out the interpolants and weights. Note that this
-    // will yield different results than if projecting the centre of output texels.
-
-    const unsigned int batchSize = inputInfo.GetShape()[0];
-    const unsigned int channelCount = inputInfo.GetShape()[dataLayout.GetChannelsIndex()];
-
-    const unsigned int inputHeight = inputInfo.GetShape()[dataLayout.GetHeightIndex()];
-    const unsigned int inputWidth = inputInfo.GetShape()[dataLayout.GetWidthIndex()];
-    const unsigned int outputHeight = outputInfo.GetShape()[dataLayout.GetHeightIndex()];
-    const unsigned int outputWidth = outputInfo.GetShape()[dataLayout.GetWidthIndex()];
-
-    // How much to scale pixel coordinates in the output image, to get the corresponding pixel coordinates
-    // in the input image.
-    const float scaleY = boost::numeric_cast<float>(inputHeight) / boost::numeric_cast<float>(outputHeight);
-    const float scaleX = boost::numeric_cast<float>(inputWidth) / boost::numeric_cast<float>(outputWidth);
-
-    TensorShape inputShape =  inputInfo.GetShape();
-    TensorShape outputShape =  outputInfo.GetShape();
-
-    for (unsigned int n = 0; n < batchSize; ++n)
-    {
-        for (unsigned int c = 0; c < channelCount; ++c)
-        {
-            for (unsigned int y = 0; y < outputHeight; ++y)
-            {
-                // Corresponding real-valued height coordinate in input image.
-                const float iy = boost::numeric_cast<float>(y) * scaleY;
-
-                // Discrete height coordinate of top-left texel (in the 2x2 texel area used for interpolation).
-                const float fiy = floorf(iy);
-                const unsigned int y0 = boost::numeric_cast<unsigned int>(fiy);
-
-                // Interpolation weight (range [0,1]).
-                const float yw = iy - fiy;
-
-                for (unsigned int x = 0; x < outputWidth; ++x)
-                {
-                    // Real-valued and discrete width coordinates in input image.
-                    const float ix = boost::numeric_cast<float>(x) * scaleX;
-                    const float fix = floorf(ix);
-                    const unsigned int x0 = boost::numeric_cast<unsigned int>(fix);
-
-                    // Interpolation weight (range [0,1]).
-                    const float xw = ix - fix;
-
-                    // Discrete width/height coordinates of texels below and to the right of (x0, y0).
-                    const unsigned int x1 = std::min(x0 + 1, inputWidth - 1u);
-                    const unsigned int y1 = std::min(y0 + 1, inputHeight - 1u);
-
-                    // Interpolation
-                    in[dataLayout.GetIndex(inputShape, n, c, y0, x0)];
-                    float input1 = in.Get();
-                    in[dataLayout.GetIndex(inputShape, n, c, y0, x1)];
-                    float input2 = in.Get();
-                    in[dataLayout.GetIndex(inputShape, n, c, y1, x0)];
-                    float input3 = in.Get();
-                    in[dataLayout.GetIndex(inputShape, n, c, y1, x1)];
-                    float input4 = in.Get();
-
-                    const float ly0 = Lerp(input1, input2, xw); // lerp along row y0.
-                    const float ly1 = Lerp(input3, input4, xw); // lerp along row y1.
-                    const float l = Lerp(ly0, ly1, yw);
-
-                    out[dataLayout.GetIndex(outputShape, n, c, y, x)];
-                    out.Set(l);
-                }
-            }
-        }
-    }
-}
-
-} //namespace armnn
diff --git a/src/backends/reference/workloads/ResizeBilinear.hpp b/src/backends/reference/workloads/ResizeBilinear.hpp
deleted file mode 100644
index ad2e487..0000000
--- a/src/backends/reference/workloads/ResizeBilinear.hpp
+++ /dev/null
@@ -1,22 +0,0 @@
-//
-// Copyright © 2017 Arm Ltd. All rights reserved.
-// SPDX-License-Identifier: MIT
-//
-
-#pragma once
-
-#include "BaseIterator.hpp"
-#include <armnn/Tensor.hpp>
-
-#include <DataLayoutIndexed.hpp>
-
-namespace armnn
-{
-
-void ResizeBilinear(Decoder<float>&               in,
-                    const TensorInfo&             inputInfo,
-                    Encoder<float>&               out,
-                    const TensorInfo&             outputInfo,
-                    armnnUtils::DataLayoutIndexed dataLayout = DataLayout::NCHW);
-
-} //namespace armnn