MLCE-79 NEON QASYMM8 Addition Support

Unit tests not yet added as need Compute Library a84faffd.

Change-Id: Ica16df493e8d6a76da9d1f74bf43b8403f9dff62
diff --git a/src/backends/neon/workloads/NeonAdditionWorkload.cpp b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
new file mode 100644
index 0000000..70a3909
--- /dev/null
+++ b/src/backends/neon/workloads/NeonAdditionWorkload.cpp
@@ -0,0 +1,48 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonAdditionWorkload.hpp"
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+
+namespace armnn
+{
+
+arm_compute::Status NeonAdditionWorkloadValidate(const TensorInfo& input0,
+                                                 const TensorInfo& input1,
+                                                 const TensorInfo& output)
+{
+    const arm_compute::TensorInfo aclInput0 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
+    const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
+    const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
+
+    return arm_compute::NEArithmeticAddition::validate(&aclInput0,
+                                                       &aclInput1,
+                                                       &aclOutput,
+                                                       arm_compute::ConvertPolicy::SATURATE);
+}
+
+
+NeonAdditionWorkload::NeonAdditionWorkload(const AdditionQueueDescriptor& descriptor,
+                                           const WorkloadInfo& info)
+    : BaseWorkload<AdditionQueueDescriptor>(descriptor, info)
+{
+    m_Data.ValidateInputsOutputs("NeonAdditionWorkload", 2, 1);
+
+    arm_compute::ITensor& input1 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
+    arm_compute::ITensor& input2 = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
+    arm_compute::ITensor& output = boost::polymorphic_downcast<INeonTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
+
+    m_AddLayer.configure(&input1, &input2, &output, arm_compute::ConvertPolicy::SATURATE);
+}
+
+void NeonAdditionWorkload::Execute() const
+{
+    ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonAdditionWorkload_Execute");
+    m_AddLayer.run();
+}
+
+} //namespace armnn
+