IVGCVSW-2957 MergerLayer subtensor optimization now backend agnostic

+ Update clframework pin
+ Cl and Neon Merger workloads updated to use MemoryLayout agnostic API
+ Workloads only use sub-tensor optimization if ALL input tensors are sub-tensors
+ Refactor LayerSupportCommon code to be a bit more succinct

Change-Id: Ib61ad4ccbd767e924dff07e61022e0cda4069828
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
diff --git a/src/backends/cl/ClLayerSupport.cpp b/src/backends/cl/ClLayerSupport.cpp
index cfc0f11..a5c5f2b 100644
--- a/src/backends/cl/ClLayerSupport.cpp
+++ b/src/backends/cl/ClLayerSupport.cpp
@@ -416,7 +416,14 @@
                                        const OriginsDescriptor& descriptor,
                                        Optional<std::string&> reasonIfUnsupported) const
 {
-    if(descriptor.GetNumDimensions() - descriptor.GetConcatAxis() == 1)
+    if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
+    {
+        SetValueChecked(reasonIfUnsupported, "Cl Merger: Concat axis > Number of dimensions.");
+        return false;
+    }
+
+    unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
+    if(concatInnerAxis < 3) // Width, height, or channels
     {
         FORWARD_WORKLOAD_VALIDATE_FUNC(ClMergerWorkloadValidate,
                                        reasonIfUnsupported,
@@ -424,12 +431,24 @@
                                        output,
                                        descriptor);
     }
-    else
+    else if (concatInnerAxis == 3)
     {
-        return IsSupportedForDataTypeCl(reasonIfUnsupported,
-                                        inputs[0]->GetDataType(),
-                                        &TrueFunc<>,
-                                        &TrueFunc<>);
+        // We rely on the sub-tensor optimization to handle the batch dimension for 4D tensors. If we can't use
+        // sub-tensors for this then we can't support it. Here is where we check that the sub-tensors will work.
+        for (auto& input : inputs)
+        {
+            if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
+            {
+                SetValueChecked(reasonIfUnsupported, "Cl Merger: Types and quantization parameters must match.");
+                return false;
+            }
+        }
+        return true; // Sub-tensors support concat along batch
+    }
+    else // > 4 dimensions not supported.
+    {
+        SetValueChecked(reasonIfUnsupported, "Cl Merger: Maximum of 4 dimensions supported.");
+        return false;
     }
 }
 
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index d41a7e5..e4097a1 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -113,6 +113,12 @@
         coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
     }
 
+    const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
+    if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
+    {
+        return nullptr;
+    }
+
     return std::make_unique<ClSubTensorHandle>(
         boost::polymorphic_downcast<IClTensorHandle*>(&parent), shape, coords);
 }
diff --git a/src/backends/cl/workloads/ClMergerWorkload.cpp b/src/backends/cl/workloads/ClMergerWorkload.cpp
index e06d8c5..610acb9 100644
--- a/src/backends/cl/workloads/ClMergerWorkload.cpp
+++ b/src/backends/cl/workloads/ClMergerWorkload.cpp
@@ -9,16 +9,25 @@
 #include <cl/ClTensorHandle.hpp>
 #include <cl/ClLayerSupport.hpp>
 
+#include <arm_compute/core/Types.h>
+
 #include <boost/polymorphic_pointer_cast.hpp>
 
 namespace armnn
 {
 using namespace armcomputetensorutils;
 
+namespace
+{
+size_t CalcAxis(const MergerDescriptor& desc)
+{
+    return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1;
+}
+} //namespace
+
 arm_compute::Status ClMergerWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
                                              const TensorInfo& output,
                                              const MergerDescriptor& descriptor)
-
 {
     std::vector<arm_compute::TensorInfo> aclInputs;
     for (const TensorInfo* input : inputs)
@@ -27,59 +36,65 @@
         aclInputs.emplace_back(aclInputInfo);
     }
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-    arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH;
-
     std::vector<arm_compute::ITensorInfo*> aclInputPtrs;
     for (arm_compute::ITensorInfo& input : aclInputs)
     {
         aclInputPtrs.emplace_back(&input);
     }
 
+    size_t aclAxis = CalcAxis(descriptor);
     return arm_compute::CLConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis);
-
 }
 
 ClMergerWorkload::ClMergerWorkload(const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
 : BaseWorkload<MergerQueueDescriptor>(descriptor, info)
 {
-    m_Execute = true;
+    bool allInputsAreSubtensors = true;
 
-    unsigned int innerAxisOrder = descriptor.m_Parameters.GetNumDimensions() - descriptor.m_Parameters.GetConcatAxis();
-
-    if (innerAxisOrder != 1)
+    // Check that all inputs are sub-tensors
+    for (auto input : descriptor.m_Inputs)
     {
-        m_Execute = false;
+        if (!input->GetParent())
+        {
+            // Non sub-tensor input found so we need to execute the merger function
+            allInputsAreSubtensors = false;
+            break;
+        }
+    }
+
+    if (allInputsAreSubtensors)
+    {
+        // Can skip configuring the merger function since it's not executed
         return;
     }
 
     std::vector<arm_compute::ICLTensor *> aclInputs;
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW);
     for (auto input : m_Data.m_Inputs)
     {
         arm_compute::ICLTensor& aclInput  = boost::polymorphic_pointer_downcast<IClTensorHandle>(input)->GetTensor();
-        aclInput.info()->set_data_layout(aclDataLayout);
         aclInputs.emplace_back(&aclInput);
     }
     arm_compute::ICLTensor& output = boost::polymorphic_pointer_downcast<IClTensorHandle>(
                                                                          m_Data.m_Outputs[0])->GetTensor();
-    output.info()->set_data_layout(aclDataLayout);
 
-    arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH;
+    // Create the layer function
+    m_Layer.reset(new arm_compute::CLConcatenateLayer());
 
-    m_Layer.configure(aclInputs, &output, aclAxis);
+    // Configure input and output tensors
+    size_t aclAxis = CalcAxis(descriptor.m_Parameters);
+    m_Layer->configure(aclInputs, &output, aclAxis);
 
-    m_Layer.prepare();
-
+    // Prepare
+    m_Layer->prepare();
 }
 
 void ClMergerWorkload::Execute() const
 {
-    if (m_Execute)
+    if (m_Layer)
     {
         ARMNN_SCOPED_PROFILING_EVENT_CL("ClMergerWorkload_Execute");
-        m_Layer.run();
+        m_Layer->run();
     }
-
 }
 
 } //namespace armnn
\ No newline at end of file
diff --git a/src/backends/cl/workloads/ClMergerWorkload.hpp b/src/backends/cl/workloads/ClMergerWorkload.hpp
index 8189a1b..1c2f823 100644
--- a/src/backends/cl/workloads/ClMergerWorkload.hpp
+++ b/src/backends/cl/workloads/ClMergerWorkload.hpp
@@ -24,8 +24,7 @@
     void Execute() const override;
 
 private:
-    mutable arm_compute::CLConcatenateLayer m_Layer;
-    bool m_Execute;
+    mutable std::unique_ptr<arm_compute::CLConcatenateLayer> m_Layer;
 };
 
 } //namespace armnn
diff --git a/src/backends/neon/NeonLayerSupport.cpp b/src/backends/neon/NeonLayerSupport.cpp
index 46a7e6f..898660c 100644
--- a/src/backends/neon/NeonLayerSupport.cpp
+++ b/src/backends/neon/NeonLayerSupport.cpp
@@ -52,10 +52,7 @@
 #if defined(ARMCOMPUTENEON_ENABLED)
     return true;
 #else
-    if (reasonIfUnsupported)
-    {
-        reasonIfUnsupported.value() = "The armnn library has been built without NEON support";
-    }
+    SetValueChecked(reasonIfUnsupported, "The armnn library has been built without NEON support");
     return false;
 #endif
 }
@@ -304,7 +301,14 @@
                                          const OriginsDescriptor& descriptor,
                                          Optional<std::string&> reasonIfUnsupported) const
 {
-    if(descriptor.GetNumDimensions() - descriptor.GetConcatAxis() == 1)
+    if (descriptor.GetNumDimensions() <= descriptor.GetConcatAxis())
+    {
+        SetValueChecked(reasonIfUnsupported, "Neon Merger: Concat axis > Number of dimensions.");
+        return false;
+    }
+
+    unsigned int concatInnerAxis = (descriptor.GetNumDimensions() - descriptor.GetConcatAxis()) - 1;
+    if(concatInnerAxis < 3) // Width, height, or channels
     {
         FORWARD_WORKLOAD_VALIDATE_FUNC(NeonMergerWorkloadValidate,
                                        reasonIfUnsupported,
@@ -312,13 +316,23 @@
                                        output,
                                        descriptor);
     }
-    else
-     {
-         return IsSupportedForDataTypeNeon(reasonIfUnsupported,
-                                           inputs[0]->GetDataType(),
-                                           &TrueFunc<>,
-                                           &TrueFunc<>);
-      }
+    else if (concatInnerAxis == 3)
+    {
+        for (auto& input : inputs)
+        {
+            if (input && !output.IsTypeSpaceMatch(*input)) // Cannot use sub-tensors if the types are not same space
+            {
+                SetValueChecked(reasonIfUnsupported, "Neon Merger: Types and quantization parameters must match.");
+                return false;
+            }
+        }
+        return true; // Sub-tensors support concat along batch
+    }
+    else // > 4 dimensions not supported.
+    {
+        SetValueChecked(reasonIfUnsupported, "Neon Merger: Maximum of 4 dimensions supported.");
+        return false;
+    }
 }
 
 bool NeonLayerSupport::IsMinimumSupported(const TensorInfo& input0,
diff --git a/src/backends/neon/NeonWorkloadFactory.cpp b/src/backends/neon/NeonWorkloadFactory.cpp
index 101e59d..8db5f9a 100644
--- a/src/backends/neon/NeonWorkloadFactory.cpp
+++ b/src/backends/neon/NeonWorkloadFactory.cpp
@@ -61,6 +61,12 @@
         coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
     }
 
+    const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
+    if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
+    {
+        return nullptr;
+    }
+
     return std::make_unique<NeonSubTensorHandle>(
         boost::polymorphic_downcast<INeonTensorHandle*>(&parent), shape, coords);
 }
diff --git a/src/backends/neon/workloads/NeonMergerWorkload.cpp b/src/backends/neon/workloads/NeonMergerWorkload.cpp
index be096b4..64d4d93 100644
--- a/src/backends/neon/workloads/NeonMergerWorkload.cpp
+++ b/src/backends/neon/workloads/NeonMergerWorkload.cpp
@@ -11,12 +11,20 @@
 #include <backendsCommon/CpuTensorHandle.hpp>
 #include <neon/NeonTensorHandle.hpp>
 
-#include <arm_compute/runtime/NEON/functions/NEConcatenateLayer.h>
+
 
 namespace armnn
 {
 using namespace armcomputetensorutils;
 
+namespace
+{
+size_t CalcAxis(const armnn::MergerDescriptor& desc)
+{
+    return (desc.GetNumDimensions() - desc.GetConcatAxis()) - 1;
+}
+} //namespace
+
 arm_compute::Status NeonMergerWorkloadValidate(const std::vector<const TensorInfo*>& inputs,
                                                const TensorInfo& output,
                                                const MergerDescriptor& descriptor)
@@ -25,60 +33,66 @@
     std::vector<arm_compute::TensorInfo> aclInputs;
     for (const TensorInfo* input : inputs)
     {
-       arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW);
-       aclInputs.emplace_back(aclInputInfo);
+        arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(*input, armnn::DataLayout::NCHW);
+        aclInputs.emplace_back(aclInputInfo);
     }
     const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output);
-    arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH;
-
     std::vector<arm_compute::ITensorInfo*> aclInputPtrs;
     for (arm_compute::ITensorInfo& input : aclInputs)
     {
         aclInputPtrs.emplace_back(&input);
     }
 
+    size_t aclAxis = CalcAxis(descriptor);
     return arm_compute::NEConcatenateLayer::validate(aclInputPtrs, &aclOutputInfo, aclAxis);
-
 }
 
 NeonMergerWorkload::NeonMergerWorkload(
 const MergerQueueDescriptor& descriptor, const WorkloadInfo& info)
         : BaseWorkload<MergerQueueDescriptor>(descriptor, info)
 {
-    m_Execute = true;
+    bool allInputsAreSubtensors = true;
 
-    unsigned int innerAxisOrder = descriptor.m_Parameters.GetNumDimensions() - descriptor.m_Parameters.GetConcatAxis();
-
-    if (innerAxisOrder != 1)
+    // Check that all inputs are sub-tensors
+    for (auto input : descriptor.m_Inputs)
     {
-        m_Execute = false;
+        if (!input->GetParent())
+        {
+            // Non sub-tensor input found so we need to execute the merger function
+            allInputsAreSubtensors = false;
+            break;
+        }
+    }
+
+    if (allInputsAreSubtensors)
+    {
+        // Can skip configuring the merger function since it's not executed
         return;
     }
 
     std::vector<arm_compute::ITensor *> aclInputs;
-    arm_compute::DataLayout aclDataLayout = ConvertDataLayout(armnn::DataLayout::NCHW);
     for (auto input : m_Data.m_Inputs)
     {
         arm_compute::ITensor& aclInput  = boost::polymorphic_pointer_downcast<INeonTensorHandle>(input)->GetTensor();
-        aclInput.info()->set_data_layout(aclDataLayout);
         aclInputs.emplace_back(&aclInput);
     }
     arm_compute::ITensor& output = boost::polymorphic_pointer_downcast<INeonTensorHandle>(
-                                                                       m_Data.m_Outputs[0])->GetTensor();
-    output.info()->set_data_layout(aclDataLayout);
+        m_Data.m_Outputs[0])->GetTensor();
 
-    arm_compute::DataLayoutDimension aclAxis = arm_compute::DataLayoutDimension::WIDTH;
+    // Create the layer function
+    m_Layer.reset(new arm_compute::NEConcatenateLayer());
 
-    auto layer = std::make_unique<arm_compute::NEConcatenateLayer>();
-    layer->configure(aclInputs, &output, aclAxis);
-    m_Layer.reset(layer.release());
+    // Configure input and output tensors
+    size_t aclAxis = CalcAxis(descriptor.m_Parameters);
+    m_Layer->configure(aclInputs, &output, aclAxis);
 
+    // Prepare
     m_Layer->prepare();
 }
 
 void NeonMergerWorkload::Execute() const
 {
-    if (m_Execute)
+    if (m_Layer)
     {
         ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonMergerWorkload_Execute");
         m_Layer->run();
diff --git a/src/backends/neon/workloads/NeonMergerWorkload.hpp b/src/backends/neon/workloads/NeonMergerWorkload.hpp
index 3432c62..1dd9309 100644
--- a/src/backends/neon/workloads/NeonMergerWorkload.hpp
+++ b/src/backends/neon/workloads/NeonMergerWorkload.hpp
@@ -9,7 +9,8 @@
 
 #include <arm_compute/core/Error.h>
 #include <arm_compute/runtime/IFunction.h>
-#
+#include <arm_compute/runtime/NEON/functions/NEConcatenateLayer.h>
+
 #include <memory>
 
 namespace armnn
@@ -27,9 +28,7 @@
     void Execute() const override;
 
 private:
-    std::unique_ptr<arm_compute::IFunction> m_Layer;
-    bool m_Execute;
-
+    std::unique_ptr<arm_compute::NEConcatenateLayer> m_Layer;
 };
 
 } //namespace armnn