IVGCVSW-2043 - Merger using ACL for innermost concat axis
     * Add ClMergerWorkload and NeonMergerWorkload to call ACL for innermost concat axis
     * Modify layer support to call ClMergerWorkloadValidate and NeonMergerWorkloadValidate when concat axis is inner most
     * Add m_ConcatAxis to MergerDescriptor
     * Modify MergerQueueDescriptor::Validate to check sub tensor only when using subtensor

!android-nn-driver:166

Change-Id: I56676b43964c8d6d726387b41b3cc34a512c0f0a
diff --git a/src/backends/neon/workloads/NeonMergerWorkload.cpp b/src/backends/neon/workloads/NeonMergerWorkload.cpp
new file mode 100644
index 0000000..f82e244
--- /dev/null
+++ b/src/backends/neon/workloads/NeonMergerWorkload.cpp
@@ -0,0 +1,84 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "NeonMergerWorkload.hpp"
+#include <armnn/ArmNN.hpp>
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <neon/NeonTensorHandle.hpp>
+
+
+namespace armnn
+{
+using namespace armcomputetensorutils;
+
+arm_compute::Status NeonMergerWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
+                                               const TensorInfo& output,
+                                               const MergerDescriptor& descriptor)
+
+{
+    std::vector<arm_compute::TensorInfo> aclInputs;
+    for (const TensorInfo* input : inputs)
+    {
+       arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW);
+       aclInputs.emplace_back(aclInputInfo);
+    }
+    const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
+    arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH;
+
+    std::vector<arm_compute::ITensorInfo*> aclInputPtrs;
+    for (arm_compute::ITensorInfo& input : aclInputs)
+    {
+        aclInputPtrs.emplace_back(&input);
+    }
+
+    return arm_compute::NEConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis);
+
+}
+
+NeonMergerWorkload::NeonMergerWorkload(
+const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
+        : BaseWorkload<MergerQueueDescriptor>(descriptor, info)
+{
+    m_Execute = true;
+
+    unsigned int innerAxisOrder = descriptor.m_Parameters.GetNumDimensions() - descriptor.m_Parameters.GetConcatAxis();
+
+    if (innerAxisOrder != 1)
+    {
+        m_Execute = false;
+        return;
+    }
+
+    std::vector<arm_compute::ITensor *> aclInputs;
+    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW);
+    for (auto input : m_Data.m_Inputs)
+    {
+        arm_compute::ITensor& aclInput  = boost::polymorphic_pointer_downcast<INeonTensorHandle>(input)->GetTensor();
+        aclInput.info()->set_data_layout(aclDataLayout);
+        aclInputs.emplace_back(&aclInput);
+    }
+    arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<INeonTensorHandle>(
+                                                                       m_Data.m_Outputs[0])->GetTensor();
+    output.info()->set_data_layout(aclDataLayout);
+
+    arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH;
+
+    m_Layer.configure(aclInputs, &output, aclAxis);
+
+    m_Layer.prepare();
+}
+
+void NeonMergerWorkload::Execute() const
+{
+    if (m_Execute)
+    {
+        ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMergerWorkload_Execute");
+        m_Layer.run();
+    }
+}
+
+} //namespace armnn
+