IVGCVSW-2957 MergerLayer subtensor optimization now backend agnostic

+ Update clframework pin
+ Cl and Neon Merger workloads updated to use MemoryLayout agnostic API
+ Workloads only use sub-tensor optimization if ALL input tensors are sub-tensors
+ Refactor LayerSupportCommon code to be a bit more succinct

Change-Id: Ib61ad4ccbd767e924dff07e61022e0cda4069828
Signed-off-by: Derek Lamberti <derek.lamberti@arm.com>
diff --git a/src/backends/cl/ClWorkloadFactory.cpp b/src/backends/cl/ClWorkloadFactory.cpp
index d41a7e5..e4097a1 100644
--- a/src/backends/cl/ClWorkloadFactory.cpp
+++ b/src/backends/cl/ClWorkloadFactory.cpp
@@ -113,6 +113,12 @@
         coords.set(i, boost::numeric_cast<int>(subTensorOrigin[revertedIndex]));
     }
 
+    const arm_compute::TensorShape parentShape = armcomputetensorutils::BuildArmComputeTensorShape(parent.GetShape());
+    if (!::arm_compute::error_on_invalid_subtensor(__func__, __FILE__, __LINE__, parentShape, coords, shape))
+    {
+        return nullptr;
+    }
+
     return std::make_unique<ClSubTensorHandle>(
         boost::polymorphic_downcast<IClTensorHandle*>(&parent), shape, coords);
 }