IVGCVSW-3218 Refactor the Reference Workloads for the ResizeBilinear layer

 * Refactored ResizeBilinear Reference Workloads to combine Float32 and Uint8 files

Signed-off-by: Ellen Norris-Thompson <ellen.norris-thompson@arm.com>
Change-Id: I725a830f4c4755a7d3a37ca68e31e44e7eb267cb
diff --git a/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
new file mode 100644
index 0000000..03fcec2
--- /dev/null
+++ b/src/backends/reference/workloads/RefResizeBilinearWorkload.cpp
@@ -0,0 +1,35 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "RefResizeBilinearWorkload.hpp"
+
+#include "RefWorkloadUtils.hpp"
+#include "ResizeBilinear.hpp"
+#include "BaseIterator.hpp"
+#include "Profiling.hpp"
+
+#include "BaseIterator.hpp"
+#include "Decoders.hpp"
+#include "Encoders.hpp"
+
+namespace armnn
+{
+
+void RefResizeBilinearWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT(Compute::CpuRef, "RefResizeBilinearWorkload_Execute");
+
+    const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
+    const TensorInfo& outputInfo = GetTensorInfo(m_Data.m_Outputs[0]);
+
+    std::unique_ptr<Decoder<float>> decoderPtr = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
+    Decoder<float> &decoder = *decoderPtr;
+    std::unique_ptr<Encoder<float>> encoderPtr = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
+    Encoder<float> &encoder = *encoderPtr;
+
+    ResizeBilinear(decoder, inputInfo, encoder, outputInfo, m_Data.m_Parameters.m_DataLayout);
+}
+
+} //namespace armnn