IVGCVSW-1048 RESIZE_BILINEAR NEON operator

 * Implemented NeonResizeBilinearWorkload
 * Enable ResizeBilinear Operator unit tests for Neon

!android-nn-driver:405

Change-Id: Iec3100ccaf7d246e8eaf683d1f3ec9191df5241e
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index 919c716..713418d 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -46,6 +46,8 @@
     NeonPooling2dWorkload.hpp
     NeonReshapeWorkload.cpp
     NeonReshapeWorkload.hpp
+    NeonResizeBilinearWorkload.cpp
+    NeonResizeBilinearWorkload.hpp
     NeonSoftmaxBaseWorkload.cpp
     NeonSoftmaxBaseWorkload.hpp
     NeonSoftmaxFloatWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp b/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp
new file mode 100644
index 0000000..37f97bf
--- /dev/null
+++ b/src/backends/neon/workloads/NeonResizeBilinearWorkload.cpp
@@ -0,0 +1,59 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonResizeBilinearWorkload.hpp"
+
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <neon/NeonTensorHandle.hpp>
+#include <neon/NeonLayerSupport.hpp>
+
+using namespace armnn::armcomputetensorutils;
+
+namespace armnn
+{
+
+arm_compute::Status NeonResizeBilinearWorkloadValidate(const TensorInfo& input, const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInputInfo  = armcomputetensorutils::BuildArmComputeTensorInfo(input);
+    const arm_compute::TensorInfo aclOutputInfo = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    return arm_compute::NEScale::validate(&aclInputInfo,
+                                          &aclOutputInfo,
+                                          arm_compute::InterpolationPolicy::BILINEAR,
+                                          arm_compute::BorderMode::REPLICATE,
+                                          arm_compute::PixelValue(0.f),
+                                          arm_compute::SamplingPolicy::TOP_LEFT);
+}
+
+NeonResizeBilinearWorkload::NeonResizeBilinearWorkload(const ResizeBilinearQueueDescriptor& descriptor,
+                                                       const WorkloadInfo& info)
+    : BaseWorkload<ResizeBilinearQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("NeonResizeBilinearWorkload", 1, 1);
+
+    arm_compute::ITensor& input = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout);
+    input.info()->set_data_layout(aclDataLayout);
+    output.info()->set_data_layout(aclDataLayout);
+
+    m_ResizeBilinearLayer.configure(&input,
+                                    &output,
+                                    arm_compute::InterpolationPolicy::BILINEAR,
+                                    arm_compute::BorderMode::REPLICATE,
+                                    arm_compute::PixelValue(0.f),
+                                    arm_compute::SamplingPolicy::TOP_LEFT);
+};
+
+void NeonResizeBilinearWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonResizeBilinearWorkload_Execute");
+    m_ResizeBilinearLayer.run();
+}
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp b/src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp
new file mode 100644
index 0000000..21753b3
--- /dev/null
+++ b/src/backends/neon/workloads/NeonResizeBilinearWorkload.hpp
@@ -0,0 +1,29 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <backendsCommon/Workload.hpp>
+
+#include <neon/workloads/NeonWorkloadUtils.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEScale.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonResizeBilinearWorkloadValidate(const TensorInfo& input, const TensorInfo& output);
+
+class NeonResizeBilinearWorkload : public BaseWorkload<ResizeBilinearQueueDescriptor>
+{
+public:
+    NeonResizeBilinearWorkload(const ResizeBilinearQueueDescriptor& descriptor, const WorkloadInfo& info);
+    void Execute() const override;
+
+private:
+    mutable arm_compute::NEScale m_ResizeBilinearLayer;
+};
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 70f9e37..e034cc9 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -25,6 +25,7 @@
 #include "NeonPermuteWorkload.hpp"
 #include "NeonPooling2dWorkload.hpp"
 #include "NeonReshapeWorkload.hpp"
+#include "NeonResizeBilinearWorkload.hpp"
 #include "NeonSoftmaxFloatWorkload.hpp"
 #include "NeonSoftmaxUint8Workload.hpp"
 #include "NeonSplitterWorkload.hpp"