MLCE-156: Add Division workload in CpuAcc

Change-Id: I1f228fcaf1077867d9755a2b850c6703387fab34
Signed-off-by: Pablo Tello <pablo.tello@arm.com>
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 47bcc2e..6ca69f4 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -38,6 +38,7 @@
 #include "workloads/NeonConcatWorkload.hpp"
 #include "workloads/NeonMinimumWorkload.hpp"
 #include "workloads/NeonMultiplicationWorkload.hpp"
+#include "workloads/NeonDivisionWorkload.hpp"
 #include "workloads/NeonNormalizationFloatWorkload.hpp"
 #include "workloads/NeonFullyConnectedWorkload.hpp"
 #include "workloads/NeonPadWorkload.hpp"
@@ -554,6 +555,18 @@
                                    output);
 }
 
+bool NeonLayerSupport::IsDivisionSupported(const TensorInfo& input0,
+                                           const TensorInfo& input1,
+                                           const TensorInfo& output,
+                                           Optional<std::string&> reasonIfUnsupported) const
+{
+    FORWARD_WORKLOAD_VALIDATE_FUNC(NeonDivisionWorkloadValidate,
+                                   reasonIfUnsupported,
+                                   input0,
+                                   input1,
+                                   output);
+}
+
 bool NeonLayerSupport::IsNormalizationSupported(const TensorInfo& input,
                                                 const TensorInfo& output,
                                                 const NormalizationDescriptor& descriptor,
diff --git a/src/backends/neon/NeonLayerSupport.hpp b/src/backends/neon/NeonLayerSupport.hpp
index e49e78b..f1d87f6 100644
--- a/src/backends/neon/NeonLayerSupport.hpp
+++ b/src/backends/neon/NeonLayerSupport.hpp
@@ -176,6 +176,11 @@
                                    const TensorInfo& output,
                                    Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
 
+    bool IsDivisionSupported(const TensorInfo& input0,
+                             const TensorInfo& input1,
+                             const TensorInfo& output,
+                             Optional<std::string&> reasonIfUnsupported = EmptyOptional()) const override;
+
     bool IsNormalizationSupported(const TensorInfo& input,
                                   const TensorInfo& output,
                                   const NormalizationDescriptor& descriptor,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 649cb9f..cb2e88e 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -216,10 +216,10 @@
 std::unique_ptr<armnn::IWorkload> NeonWorkloadFactory::CreateDivision(
     const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info) const
 {
-    return MakeWorkloadHelper<NullWorkload, NullWorkload>(descriptor, info);
+    return std::make_unique<NeonDivisionWorkload>(descriptor, info);
 }
 
-std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor& 
+std::unique_ptr<IWorkload> NeonWorkloadFactory::CreateElementwiseUnary(const ElementwiseUnaryQueueDescriptor&
                                                                        descriptor,
                                                                        const WorkloadInfo& info) const
 {
diff --git a/src/backends/neon/backend.mk b/src/backends/neon/backend.mk
index 05dcd02..740cbcd 100644
--- a/src/backends/neon/backend.mk
+++ b/src/backends/neon/backend.mk
@@ -45,6 +45,7 @@
         workloads/NeonMeanWorkload.cpp \
         workloads/NeonMinimumWorkload.cpp \
         workloads/NeonMultiplicationWorkload.cpp \
+        workloads/NeonDivisionWorkload.cpp \
         workloads/NeonNormalizationFloatWorkload.cpp \
         workloads/NeonPadWorkload.cpp \
         workloads/NeonPermuteWorkload.cpp \
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index 400a5a3..3e1888c 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -181,6 +181,14 @@
                                       DataType::QAsymmU8>();
 }
 
+BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
+{
+    NeonCreateElementwiseWorkloadTest<NeonDivisionWorkload,
+                                      DivisionQueueDescriptor,
+                                      DivisionLayer,
+                                      armnn::DataType::Float32>();
+}
+
 template <typename BatchNormalizationWorkloadType, typename armnn::DataType DataType>
 static void NeonCreateBatchNormalizationWorkloadTest(DataLayout dataLayout)
 {
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index f14b2a4..1b25cad 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -491,6 +491,12 @@
 ARMNN_AUTO_TEST_CASE(SubBroadcastUint8, SubtractionBroadcastUint8Test)
 ARMNN_AUTO_TEST_CASE(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test)
 
+// Div
+ARMNN_AUTO_TEST_CASE(SimpleDivision, DivisionTest)
+ARMNN_AUTO_TEST_CASE(DivisionByZero, DivisionByZeroTest)
+ARMNN_AUTO_TEST_CASE(DivisionBroadcast1Element, DivisionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_CASE(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest)
+
 // Mul
 ARMNN_AUTO_TEST_CASE(SimpleMultiplication, MultiplicationTest)
 ARMNN_AUTO_TEST_CASE(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
diff --git a/src/backends/neon/workloads/CMakeLists.txt b/src/backends/neon/workloads/CMakeLists.txt
index d2c549e..46b5332 100644
--- a/src/backends/neon/workloads/CMakeLists.txt
+++ b/src/backends/neon/workloads/CMakeLists.txt
@@ -52,6 +52,8 @@
     NeonMinimumWorkload.hpp
     NeonMultiplicationWorkload.cpp
     NeonMultiplicationWorkload.hpp
+    NeonDivisionWorkload.cpp
+    NeonDivisionWorkload.hpp
     NeonNormalizationFloatWorkload.cpp
     NeonNormalizationFloatWorkload.hpp
     NeonPadWorkload.cpp
diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.cpp b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
new file mode 100644
index 0000000..6fdb455
--- /dev/null
+++ b/src/backends/neon/workloads/NeonDivisionWorkload.cpp
@@ -0,0 +1,45 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonDivisionWorkload.hpp"
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo& input0,
+                                                const TensorInfo& input1,
+                                                const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
+    const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
+    const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    return arm_compute::NEElementwiseDivision::validate(&aclInput0,
+                                                   &aclInput1,
+                                                   &aclOutput);
+}
+
+NeonDivisionWorkload::NeonDivisionWorkload(const DivisionQueueDescriptor& descriptor,
+                                         const WorkloadInfo& info)
+    : BaseWorkload<DivisionQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("NeonDivisionWorkload", 2, 1);
+
+    arm_compute::ITensor& input0 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& input1 = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+    arm_compute::ITensor& output = boost::polymorphic_downcast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    m_DivLayer.configure(&input0, &input1, &output);
+}
+
+void NeonDivisionWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonDivisionWorkload_Execute");
+    m_DivLayer.run();
+}
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonDivisionWorkload.hpp b/src/backends/neon/workloads/NeonDivisionWorkload.hpp
new file mode 100644
index 0000000..2405d9a
--- /dev/null
+++ b/src/backends/neon/workloads/NeonDivisionWorkload.hpp
@@ -0,0 +1,30 @@
+//
+// Copyright © 2020 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#pragma once
+
+#include <neon/workloads/NeonWorkloadUtils.hpp>
+
+#include <arm_compute/runtime/NEON/functions/NEElementwiseOperations.h>
+
+namespace armnn
+{
+
+arm_compute::Status NeonDivisionWorkloadValidate(const TensorInfo& input0,
+                                                const TensorInfo& input1,
+                                                const TensorInfo& output);
+
+class NeonDivisionWorkload : public BaseWorkload<DivisionQueueDescriptor>
+{
+public:
+    NeonDivisionWorkload(const DivisionQueueDescriptor& descriptor, const WorkloadInfo& info);
+
+    virtual void Execute() const override;
+
+private:
+    mutable arm_compute::NEElementwiseDivision m_DivLayer;
+};
+
+} //namespace armnn
diff --git a/src/backends/neon/workloads/NeonWorkloads.hpp b/src/backends/neon/workloads/NeonWorkloads.hpp
index 104504e..39cf044 100644
--- a/src/backends/neon/workloads/NeonWorkloads.hpp
+++ b/src/backends/neon/workloads/NeonWorkloads.hpp
@@ -7,6 +7,7 @@
 #include "NeonAbsWorkload.hpp"
 #include "NeonActivationWorkload.hpp"
 #include "NeonAdditionWorkload.hpp"
+#include "NeonDivisionWorkload.hpp"
 #include "NeonArgMinMaxWorkload.hpp"
 #include "NeonBatchNormalizationWorkload.hpp"
 #include "NeonConstantWorkload.hpp"