IVGCVSW-2771 Fix SubTensor error in vgg16 ExecuteNetwork CL

 * Add check if Sub-tensors cannot be used, call ACL function
 * Add ClSplitterWorkload functions
 * Modify IsSplitterSupported to call ACL validate function
if sub-tensor cannot be used
 * Also check if quantization parameters match when using sub-tensors

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I5dfd0e422b7d485dd4421a664add83d870bec5d6
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index 21d191a..dfac289 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -15,6 +15,7 @@
 #include <boost/core/ignore_unused.hpp>
 
 #if defined(ARMCOMPUTECL_ENABLED)
+#include <aclCommon/ArmComputeUtils.hpp>
 #include "workloads/ClAdditionWorkload.hpp"
 #include "workloads/ClActivationWorkload.hpp"
 #include "workloads/ClBatchNormalizationFloatWorkload.hpp"
@@ -39,6 +40,7 @@
 #include "workloads/ClPooling2dWorkload.hpp"
 #include "workloads/ClSoftmaxBaseWorkload.hpp"
 #include "workloads/ClSpaceToBatchNdWorkload.hpp"
+#include "workloads/ClSplitterWorkload.hpp"
 #include "workloads/ClStridedSliceWorkload.hpp"
 #include "workloads/ClSubtractionWorkload.hpp"
 #endif
@@ -612,12 +614,31 @@
                                          const ViewsDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    ignore_unused(descriptor);
-    ignore_unused(outputs);
-    return IsSupportedForDataTypeCl(reasonIfUnsupported,
-                                    input.GetDataType(),
-                                    &TrueFunc<>,
-                                    &TrueFunc<>);
+#if defined(ARMCOMPUTECL_ENABLED)
+    // Split along the last dimension, cannot use sub-tensors
+    // as width and height of the sub-tensors do not match
+    // the width and height of the parent tensor
+    // in case of input with more than 2D.
+    std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor, input.GetShape());
+    if (descriptor.GetNumDimensions() > 2 && splitAxis.size() == 1 &&
+        *splitAxis.begin() == descriptor.GetNumDimensions() - 1 )
+    {
+        FORWARD_WORKLOAD_VALIDATE_FUNC(ClSplitterWorkloadValidate,
+                                       reasonIfUnsupported,
+                                       input,
+                                       outputs,
+                                       *splitAxis.begin());
+    }
+#endif
+    for (auto output : outputs)
+    {
+        if (!input.IsTypeSpaceMatch(output)) // Cannot use sub-tensors if the types are not same space
+        {
+            SetValueChecked(reasonIfUnsupported, "Cl Splitter: Types and quantization parameters must match.");
+            return false;
+        }
+    }
+    return true;
 }
 
 bool ClLayerSupport::IsStridedSliceSupported(const TensorInfo& input,
diff --git a/src/backends/cl/backend.mk b/src/backends/cl/backend.mk
index 2dc4d9c..7539f3e 100644
--- a/src/backends/cl/backend.mk
+++ b/src/backends/cl/backend.mk
@@ -50,6 +50,7 @@
         workloads/ClSoftmaxFloatWorkload.cpp \
         workloads/ClSoftmaxUint8Workload.cpp \
         workloads/ClSpaceToBatchNdWorkload.cpp \
+        workloads/ClSplitterWorkload.cpp \
         workloads/ClStridedSliceWorkload.cpp \
         workloads/ClSubtractionWorkload.cpp
 else
diff --git a/src/backends/cl/workloads/CMakeLists.txt b/src/backends/cl/workloads/CMakeLists.txt
index abbed0e..a3eedd0 100644
--- a/src/backends/cl/workloads/CMakeLists.txt
+++ b/src/backends/cl/workloads/CMakeLists.txt
@@ -64,6 +64,7 @@
     ClSoftmaxUint8Workload.hpp
     ClSpaceToBatchNdWorkload.hpp
     ClSpaceToBatchNdWorkload.cpp
+    ClSplitterWorkload.cpp
     ClSplitterWorkload.hpp
     ClStridedSliceWorkload.cpp
     ClStridedSliceWorkload.hpp
diff --git a/src/backends/cl/workloads/ClSplitterWorkload.cpp b/src/backends/cl/workloads/ClSplitterWorkload.cpp
new file mode 100644
index 0000000..9bbbcab
--- /dev/null
+++ b/src/backends/cl/workloads/ClSplitterWorkload.cpp
@@ -0,0 +1,112 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "ClSplitterWorkload.hpp"
+
+#include "ClWorkloadUtils.hpp"
+
+#include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <aclCommon/ArmComputeUtils.hpp>
+#include <backendsCommon/CpuTensorHandle.hpp>
+#include <cl/ClTensorHandle.hpp>
+
+
+namespace armnn
+{
+
+using namespace armcomputetensorutils;
+
+namespace
+{
+    unsigned int CalcAclAxis(unsigned int numDimensions, unsigned int splitAxis)
+    {
+        return (numDimensions - splitAxis) - 1;
+    }
+
+} //namespace
+
+arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo& input,
+                                               const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                                               unsigned int splitAxis)
+{
+    const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input);
+
+    size_t numOutputs = outputs.size();
+
+    std::vector<arm_compute::TensorInfo> aclOutputs;
+    aclOutputs.reserve(numOutputs);
+
+    std::vector<arm_compute::ITensorInfo*> aclOutputPtr;
+    aclOutputPtr.reserve(numOutputs);
+
+    for (size_t i = 0u; i < outputs.size(); ++i)
+    {
+        aclOutputs.emplace_back(BuildArmComputeTensorInfo(outputs[i]));
+        aclOutputPtr.emplace_back(&aclOutputs.back());
+    }
+
+    unsigned int aclAxis = CalcAclAxis(input.GetNumDimensions(), splitAxis);
+    return arm_compute::CLSplit::validate(&aclInputInfo, aclOutputPtr, aclAxis);
+}
+
+ClSplitterWorkload::ClSplitterWorkload(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info)
+        : BaseWorkload<SplitterQueueDescriptor>(descriptor, info)
+{
+    bool allOutputsAreSubtensors = true;
+
+    // Check that all outputs are sub-tensors
+    for (auto output : m_Data.m_Outputs)
+    {
+        if (output && !output->GetParent())
+        {
+            // Non sub-tensor input found so we need to execute the split function
+            allOutputsAreSubtensors = false;
+            break;
+        }
+    }
+
+    if (allOutputsAreSubtensors)
+    {
+        // Can skip configuring the split function since it's not executed
+        return;
+    }
+
+    arm_compute::ICLTensor& input = boost::polymorphic_pointer_downcast<IClTensorHandle>(
+            m_Data.m_Inputs[0])->GetTensor();
+
+    std::vector<arm_compute::ICLTensor *> aclOutputs;
+    for (auto output : m_Data.m_Outputs)
+    {
+        arm_compute::ICLTensor& aclOutput  = boost::polymorphic_pointer_downcast<IClTensorHandle>(output)->GetTensor();
+        aclOutputs.emplace_back(&aclOutput);
+    }
+
+    // Create the layer function
+    m_Layer.reset(new arm_compute::CLSplit());
+
+    // Configure input and output tensors
+    std::set<unsigned int> splitAxis = ComputeSplitAxis(descriptor.m_Parameters, m_Data.m_Inputs[0]->GetShape());
+    if (splitAxis.size() != 1)
+    {
+        throw InvalidArgumentException("Cannot derive split axis from SplitterDescriptor");
+    }
+
+    unsigned int aclAxis = CalcAclAxis(descriptor.m_Parameters.GetNumDimensions(), *splitAxis.begin());
+    m_Layer->configure(&input, aclOutputs, aclAxis);
+
+    // Prepare
+    m_Layer->prepare();
+}
+
+void ClSplitterWorkload::Execute() const
+{
+    if (m_Layer)
+    {
+        ARMNN_SCOPED_PROFILING_EVENT_CL("ClSplitterWorkload_Execute");
+        m_Layer->run();
+    }
+}
+
+} //namespace armnn
diff --git a/src/backends/cl/workloads/ClSplitterWorkload.hpp b/src/backends/cl/workloads/ClSplitterWorkload.hpp
index 9503359..d024452 100644
--- a/src/backends/cl/workloads/ClSplitterWorkload.hpp
+++ b/src/backends/cl/workloads/ClSplitterWorkload.hpp
@@ -7,19 +7,26 @@
 
 #include <backendsCommon/Workload.hpp>
 
+#include <arm_compute/runtime/CL/CLFunctions.h>
+
+#include <functional>
+
 namespace armnn
 {
 
-// Base class template providing an implementation of the Splitter layer common to all data types.
+arm_compute::Status ClSplitterWorkloadValidate(const TensorInfo& input,
+                                               const std::vector<std::reference_wrapper<TensorInfo>>& outputs,
+                                               unsigned int splitAxis);
+
 class ClSplitterWorkload : public BaseWorkload<SplitterQueueDescriptor>
 {
 public:
-    using BaseWorkload<SplitterQueueDescriptor>::BaseWorkload;
+    ClSplitterWorkload(const SplitterQueueDescriptor& descriptor, const WorkloadInfo& info);
 
-    void Execute() const override
-    {
-        // With subtensors, splitter is a no-op.
-    }
+    void Execute() const override;
+
+private:
+    mutable std::unique_ptr<arm_compute::CLSplit> m_Layer;
 };
 
 } //namespace armnn