Add Dynamic Fusion Tests with BugFixes

- Allow fusing arbitrary number of existing elementwise operators
- Fix issues with 3D and 4D tensors in Elementwise Addition and Floor components
    - Collapse the 3D/4D window in the same way as that used by Conv2d,
      i.e. collapse dim 1 and dim 2 together
- Fix Floor component issues when used after other components
- Add Dynamic Fusion Tests (Floor + Div, Conv2d + Add + Div)
- Add Addition ElementWise Broadcasting Test

Resolves: [COMPMID-5356]
Change-Id: I58b93a90175bb0440d43531d18cac94b5f5c2689
Signed-off-by: Mohammed Suhail Munshi <MohammedSuhail.Munshi@arm.com>
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/c/VisualCompute/ComputeLibrary/+/433956
Tested-by: bsgcomp <bsgcomp@arm.com>
Reviewed-by: Pablo Tello <pablo.tello@arm.com>
Comments-Addressed: bsgcomp <bsgcomp@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7957
Reviewed-by: SiCong Li <sicong.li@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h
index 57ac70a..04919ac 100644
--- a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h
@@ -371,6 +371,7 @@
     {
         return Window{};
     }
+
     /** Get the tag look-up table used to instantiate the component code.
      *
      * @param vtable
@@ -557,7 +558,7 @@
 
     std::string build_code()
     {
-        ARM_COMPUTE_ERROR_ON_MSG(_graph_root < 0, "No root found in the component graph");
+        ARM_COMPUTE_ERROR_ON_MSG(_graph_root == -1, "No root found in the component graph");
 
         // These data structures will hold the data from all the components in the blueprint
         std::set<std::string>    headers_list{};
@@ -666,9 +667,10 @@
         return _tile_info;
     }
 
+    // Get the global execution window, i.e. that of the root component
     Window get_execution_window() const
     {
-        ARM_COMPUTE_ERROR_ON_MSG(_graph_root < 0, "No root found in the component graph");
+        ARM_COMPUTE_ERROR_ON_MSG(_graph_root == -1, "No root found in the component graph");
         ARM_COMPUTE_ERROR_ON_MSG(_dst_id == -1, "Destination Tensor Id should be ready before calling get_execution_window()");
 
         return _components.find(_graph_root)->second->get_window();
@@ -925,4 +927,4 @@
 } // namespace experimental
 } // namespace arm_compute
 #endif //ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMMON_H
-#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */
\ No newline at end of file
+#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseKernelComponent.cpp b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseKernelComponent.cpp
index 24a9eee..7515aec 100644
--- a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseKernelComponent.cpp
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseKernelComponent.cpp
@@ -24,6 +24,7 @@
 #ifdef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION
 
 #include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseKernelComponent.h"
+#include "arm_compute/core/Error.h"
 #include "arm_compute/core/Validate.h"
 #include "src/core/helpers/AutoConfiguration.h"
 #include "src/core/helpers/WindowHelpers.h"
@@ -57,9 +58,13 @@
 
     auto_init_if_empty(*dst_info, out_shape, 1, lhs_info->data_type());
 
+    TensorShape output_shape = dst_info->tensor_shape();
+    // Collapse Dim 1 (W) and Dim 2 (H) together, leave Dim 0 (C) and upper dimensions unchanged
+    // This is in line with the collapsing convention used by Conv2d
+    output_shape.collapse(2U, 1U);
     const unsigned int vector_size_byte_opencl           = 16;
     const unsigned int num_elems_processed_per_iteration = adjust_vec_size(vector_size_byte_opencl / dst_info->element_size(), dst_info->dimension(0));
-    Window             win                               = calculate_max_window(*dst_info, Steps(num_elems_processed_per_iteration));
+    Window             win                               = calculate_max_window(output_shape, Steps(num_elems_processed_per_iteration));
 
     return win;
 }
@@ -83,8 +88,12 @@
         TILE({{DATA_TYPE}}, M0, N0, lhs_tile);
         TILE({{DATA_TYPE}}, M0, N0, rhs_tile);
 
+        // Since mout maps to dimensions 1 (y) and dimension 2 (z) of the input tensor because of the collapsed window, bout maps to dimension 3 (w)
+        {{lhs}}_offset_first_element_in_bytes += bout * {{lhs}}_stride_w;
+        {{rhs}}_offset_first_element_in_bytes += bout * {{rhs}}_stride_w;
+
         T_LOAD({{DATA_TYPE}}, M0, N0, BUFFER, {{lhs}}, cout, mout, 1, {{lhs}}_stride_y, lhs_tile);
-        T_LOAD({{DATA_TYPE}}, M0, N0, BUFFER, {{rhs}}, cout, mout, 1, {{rhs}}_stride_y, rhs_tile);
+        T_LOAD({{DATA_TYPE}}, {{rhs_m0}}, {{rhs_n0}}, BUFFER, {{rhs}}, {{rhs_start_x}}, {{rhs_start_y}}, 1, {{rhs}}_stride_y, rhs_tile);
 
 #if defined(IS_BROADCAST)
         T_ELTWISE_BROADCAST_{{ELTWISE_OP}}_X({{DATA_TYPE}}, M0, N0, lhs_tile, rhs_tile, {{dst}});
@@ -107,7 +116,7 @@
     {
         TILE({{DATA_TYPE}}, M0, N0, addend_tile);
 
-        T_LOAD({{DATA_TYPE}}, M0, N0, BUFFER, {{addend}}, cout, mout, 1, {{addend}}_stride_y, addend_tile);
+        T_LOAD({{DATA_TYPE}}, {{rhs_m0}}, {{rhs_n0}}, BUFFER, {{addend}}, {{rhs_start_x}}, {{rhs_start_y}}, 1, {{addend}}_stride_y, addend_tile);
 
 #if defined(IS_BROADCAST)
         T_ELTWISE_BROADCAST_{{ELTWISE_OP}}_X({{DATA_TYPE}}, M0, N0, {{acc}}, addend_tile, {{acc}});
@@ -122,16 +131,18 @@
 
 CLBuildOptions ClElementwiseKernelComponent::generate_build_options() const
 {
-    const auto t_src_info = _blueprint->impl().get_kernel_argument_info(_rhs.arg_id);
+    const auto t_rhs_info = _blueprint->impl().get_kernel_argument_info(_rhs.arg_id);
     const auto t_dst_info = _blueprint->impl().get_kernel_argument_info(_blueprint->impl().get_dst_id());
 
-    CLBuildOptions build_opts{};
-    const auto     n0           = _blueprint->impl().get_execution_window().x().step();
-    const auto     m0           = _blueprint->impl().get_execution_window().y().step();
-    const bool     is_broadcast = t_src_info->tensor_shape() != t_dst_info->tensor_shape();
+    CLBuildOptions     build_opts{};
+    const auto         n0               = _blueprint->impl().get_execution_window().x().step();
+    const auto         m0               = _blueprint->impl().get_execution_window().y().step();
+    const unsigned int partial_store_n0 = t_dst_info->dimension(0) % n0;
+    const bool         is_broadcast     = t_rhs_info->tensor_shape() != t_dst_info->tensor_shape();
 
     build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
     build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
+    build_opts.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(partial_store_n0));
     build_opts.add_option_if(is_broadcast, "-DIS_BROADCAST");
 
     return build_opts;
@@ -166,6 +177,7 @@
 {
     TagLUT     lut{};
     const auto t_dst_info = _blueprint->impl().get_kernel_argument_info(_blueprint->impl().get_dst_id());
+    const auto t_rhs_info = _blueprint->impl().get_kernel_argument_info(_rhs.arg_id);
     // Arguments and global shared variables
     const bool is_root = _blueprint->impl().group(_lhs.arg_id) == SharedVarGroup::Argument && _blueprint->impl().group(_rhs.arg_id) == SharedVarGroup::Argument;
     if(is_root)
@@ -211,6 +223,39 @@
         default:
             ARM_COMPUTE_ERROR("Arithmetic Operation not supported");
     }
+
+    // Set broadcast parameters
+    // PRE: All tensors are broadcast-compatible
+    const bool is_broadcast = t_rhs_info->tensor_shape() != t_dst_info->tensor_shape();
+    if(is_broadcast)
+    {
+        // Note that n0 maps to input tensor dimension 0, m0 maps to input dimensions 1 and 2 because of our collapse strategy
+        if(t_rhs_info->dimension(0) == 1U && t_rhs_info->dimension(1) == 1U && t_rhs_info->dimension(2) == 1U) // Broadcast in X, Y, Z: collapsed rhs win [M0xN0] = [1x1]
+        {
+            lut["rhs_m0"]      = "1";
+            lut["rhs_n0"]      = "1";
+            lut["rhs_start_y"] = "0";
+            lut["rhs_start_x"] = "0";
+        }
+        else if(t_rhs_info->dimension(1) == 1U && t_rhs_info->dimension(2) == 1U) // Broadcast in Y and Z: collapsed rhs win [M0xN0] = [1xN]
+        {
+            lut["rhs_m0"]      = "1";
+            lut["rhs_n0"]      = "N0";
+            lut["rhs_start_y"] = "0";
+            lut["rhs_start_x"] = "cout";
+        }
+        else
+        {
+            ARM_COMPUTE_ERROR("Only support rhs broadcasting in all X, Y, Z dimensions, or just in Y and Z dimensions");
+        }
+    }
+    else
+    {
+        lut["rhs_m0"]      = "M0";
+        lut["rhs_n0"]      = "N0";
+        lut["rhs_start_y"] = "mout";
+        lut["rhs_start_x"] = "cout";
+    }
     return lut;
 }
 } // namespace dynamic_fusion
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseKernelComponent.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseKernelComponent.h
index 91b14ff..f837745 100644
--- a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseKernelComponent.h
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseKernelComponent.h
@@ -37,6 +37,20 @@
 class ClElementwiseKernelComponent : public IClKernelComponent
 {
 public:
+    /** Construct a new Cl Elementwise Kernel Component object
+     *
+     * @param[in]  blueprint Blueprint to which this component is added
+     * @param[in]  desc      Component descriptor
+     * @param[in]  lhs       Link to LHS tensor
+     * @param[in]  rhs       Link to RHS tensor
+     * @param[out] dst       Link to DST tensor
+     *
+     * Support Level
+     * Data Type:       F16, F32
+     * Tensor Shape:    Any shape of arbitrary dimension >= 1 and <= 4
+     * Value Range:     All
+     * Broadcasting:    Only RHS tensor can be broadcasted into LHS. Only support broadcasting in dimension 1 and dimension 2 or all dimension 0, 1 and 2
+     */
     ClElementwiseKernelComponent(ClKernelBlueprint *blueprint, const ClElementwiseKernelDescriptor &desc, const Link &lhs, const Link &rhs, const Link &dst)
         : IClKernelComponent(blueprint), _desc{ desc }, _lhs{ lhs }, _rhs{ rhs }, _dst{ dst }
     {
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClFloorKernelComponent.cpp b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClFloorKernelComponent.cpp
index 87cc110..0a20a8f 100644
--- a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClFloorKernelComponent.cpp
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClFloorKernelComponent.cpp
@@ -21,9 +21,10 @@
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#ifdef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION
 
+#ifdef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION
 #include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClFloorKernelComponent.h"
+#include "arm_compute/core/Error.h"
 #include "arm_compute/core/Validate.h"
 #include "src/core/helpers/AutoConfiguration.h"
 #include "src/core/helpers/WindowHelpers.h"
@@ -38,12 +39,10 @@
 {
     return ComponentType::Simple;
 }
-
 std::set<std::string> ClFloorKernelComponent::get_headers_list() const
 {
     return std::set<std::string> { "common/experimental/gemm_fused_post_ops/fp_mixed_precision_helpers.h", "tile_helpers.h" };
 }
-
 Window ClFloorKernelComponent::get_window() const
 {
     const ITensorInfo *src_info = _blueprint->impl().get_kernel_argument_info(_src.arg_id);
@@ -52,16 +51,22 @@
     ARM_COMPUTE_ERROR_ON_NULLPTR(src_info, dst_info);
     auto_init_if_empty(*dst_info, src_info->tensor_shape(), 1, src_info->data_type());
 
+    TensorShape output_shape = dst_info->tensor_shape();
+    // Collapse Dim 1 (W) and Dim 2 (H) together, leave Dim 0 (C) and upper dimensions unchanged
+    // This is in line with the collapsing convention used by Conv2d
+    output_shape.collapse(2U, 1U);
     const unsigned int vector_size_byte_opencl           = 16;
     const unsigned int num_elems_processed_per_iteration = adjust_vec_size(vector_size_byte_opencl / dst_info->element_size(), dst_info->dimension(0));
-    Window             win                               = calculate_max_window(*dst_info, Steps(num_elems_processed_per_iteration));
+    Window             win                               = calculate_max_window(output_shape, Steps(num_elems_processed_per_iteration));
 
     return win;
 }
-
 std::string ClFloorKernelComponent::get_component_code() const
 {
-    return R"_(
+    bool is_root = _blueprint->impl().group(_src.arg_id) == SharedVarGroup::Argument;
+    if(is_root)
+    {
+        return R"_(
     //------------------ START KERNEL {{meta_kernel_id}} FLOOR ---------------------
     // IN_0(src)            {{src}}
     // OUT(dst, accum)      {{dst}}
@@ -69,30 +74,40 @@
     {
         TILE({{DATA_TYPE}}, M0, N0, src_tile);
 
+        // Since mout maps to dimensions 1 (y) and dimension 2 (z) of the input tensor because of the collapsed window, bout maps to dimension 3 (w)
+        {{src}}_offset_first_element_in_bytes += bout * {{src}}_stride_w;
         T_LOAD({{DATA_TYPE}}, M0, N0, BUFFER, {{src}}, cout, mout, 1, {{src}}_stride_y, src_tile);
+
         T_FLOOR({{DATA_TYPE}}, M0, N0, src_tile, {{dst}});
     }
-
     //------------------ END KERNEL {{meta_kernel_id}} FLOOR ---------------------
 )_";
+    }
+    else
+    {
+        return R"_(
+    //------------------ START KERNEL {{meta_kernel_id}} FLOOR ---------------------
+    // IN_0/Out(Accumulator)        {{acc}}
+    // output = floor(input)
+    {
+        T_FLOOR({{DATA_TYPE}}, M0, N0, {{acc}}, {{acc}});
+    }
+    //------------------ END KERNEL {{meta_kernel_id}} FLOOR ---------------------
+)_";
+    }
 }
-
 CLBuildOptions ClFloorKernelComponent::generate_build_options() const
 {
-    CLBuildOptions build_opts{};
-
-    const auto n0 = _blueprint->impl().get_execution_window().x().step();
-    const auto m0 = _blueprint->impl().get_execution_window().y().step();
-
+    CLBuildOptions     build_opts{};
+    const auto         n0               = _blueprint->impl().get_execution_window().x().step();
+    const auto         m0               = _blueprint->impl().get_execution_window().y().step();
     const auto         dst_info         = _blueprint->impl().get_kernel_argument_info(_blueprint->impl().get_dst_id());
     const unsigned int partial_store_n0 = dst_info->dimension(0) % n0;
     build_opts.add_option("-DM0=" + support::cpp11::to_string(m0));
     build_opts.add_option("-DN0=" + support::cpp11::to_string(n0));
     build_opts.add_option("-DPARTIAL_N0=" + support::cpp11::to_string(partial_store_n0));
-
     return build_opts;
 }
-
 std::string ClFloorKernelComponent::generate_config_id() const
 {
     auto        t_dst_info = _blueprint->impl().get_kernel_argument_info(_blueprint->impl().get_dst_id());
@@ -106,20 +121,28 @@
     config_id += lower_string(string_from_data_layout(t_dst_info->data_layout()));
     return config_id;
 }
-
 void ClFloorKernelComponent::allocate_shared_vars(SharedVarTable &vtable) const
 {
     vtable.add(_src, _blueprint->impl().group(_src.arg_id), ClKernelArgDescriptor(_src.arg_id, ClKernelTensorArgType::Tensor_4D_t_Buffer), "src");
     vtable.add(_dst, _blueprint->impl().group(_dst.arg_id), ClKernelArgDescriptor(_dst.arg_id, ClKernelTensorArgType::Tensor_4D_t_Buffer), "dst");
 }
-
 ClFloorKernelComponent::TagLUT ClFloorKernelComponent::get_tag_lut(const SharedVarTable &vtable) const
 {
     TagLUT     lut{};
     const auto t_dst_info = _blueprint->impl().get_kernel_argument_info(_blueprint->impl().get_dst_id());
     // Arguments and global shared variables
-    lut["src"]            = vtable.get(_src);
-    lut["dst"]            = vtable.get(_dst);
+    const bool is_root = _blueprint->impl().group(_src.arg_id) == SharedVarGroup::Argument;
+
+    if(is_root)
+    {
+        lut["src"] = vtable.get(_src);
+        lut["dst"] = vtable.get(_dst);
+    }
+    else
+    {
+        lut["acc"] = vtable.get(_src);
+    }
+
     lut["meta_kernel_id"] = id();
     lut["DATA_TYPE"]      = get_cl_type_from_data_type(t_dst_info->data_type());
     return lut;
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClFloorKernelComponent.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClFloorKernelComponent.h
index 5463e23..e791b36 100644
--- a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClFloorKernelComponent.h
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClFloorKernelComponent.h
@@ -37,6 +37,17 @@
 class ClFloorKernelComponent : public IClKernelComponent
 {
 public:
+    /** Construct a new Cl Floor Kernel Component object
+     *
+     * @param blueprint  Blueprint to which this component is added
+     * @param src        Link to SRC tensor
+     * @param dst        Link to DST tensor
+     *
+     * Support Level
+     * Data Type:       F16, F32
+     * Tensor Shape:    Any shape of arbitrary dimension >= 1 and <= 4
+     * Value Range:     All
+     */
     ClFloorKernelComponent(ClKernelBlueprint *blueprint, const Link &src, const Link &dst)
         : IClKernelComponent(blueprint), _src{ src }, _dst{ dst }
     {
@@ -71,4 +82,4 @@
 } // namespace experimental
 } // namespace arm_compute
 #endif // ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLFLOORKERNELCOMPONENT_H
-#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */
\ No newline at end of file
+#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp
index 4ac27e0..7c805d5 100644
--- a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp
@@ -108,6 +108,10 @@
     return R"_(
     //------------------ START KERNEL {{meta_kernel_id}} STORE ---------------------
     {
+    // This also follows NHWC layout
+    // cout maps to global_id(0) maps to Channel
+    // mout maps to global_id(1) maps to Height and Weight (Collapsed Window)
+    // bout maps to global_id(3) maps to N / Batch
     #define _IDST_WIDTH {{dst}}_w
     #define _IDST_HEIGHT {{dst}}_h
         TILE(uint, M0, 1, dst_indirect_y);
diff --git a/tests/validation/CL/UNIT/dynamic_fusion/ArbitraryElementwiseFusion.cpp b/tests/validation/CL/UNIT/dynamic_fusion/ArbitraryElementwiseFusion.cpp
new file mode 100644
index 0000000..1b1e8aa
--- /dev/null
+++ b/tests/validation/CL/UNIT/dynamic_fusion/ArbitraryElementwiseFusion.cpp
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifdef ENABLE_EXPERIMENTAL_DYNAMIC_FUSION
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h"
+#include "src/core/utils/helpers/float_ops.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/framework/Macros.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/reference/ConvolutionLayer.h"
+#include "tests/validation/reference/ElementwiseOperations.h"
+#include "tests/validation/reference/Permute.h"
+
+#include "arm_compute/runtime/experimental/ClCompositeOperator.h"
+#include "tests/validation/reference/Floor.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "tests/validation/CL/UNIT/dynamic_fusion/Utils.h"
+
+using namespace arm_compute::experimental::dynamic_fusion;
+using namespace arm_compute::test::validation::utils;
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+TEST_SUITE(CL)
+TEST_SUITE(UNIT)
+TEST_SUITE(DYNAMIC_FUSION)
+TEST_SUITE(ArbitraryFusion)
+
+TEST_CASE(ElementwiseBroadcasting, framework::DatasetMode::ALL)
+{
+    // Test elementwise broadcasting
+    const auto data_type   = DataType::F32;
+    const auto data_layout = DataLayout::NHWC;
+
+    const auto input_shape = TensorShape(7, 9, 5);
+    const auto rhs_shape   = TensorShape(7, 1, 1);
+    const auto dst_shape   = TensorShape(7, 9, 5);
+
+    // Tensor Info
+    auto input_info  = TensorInfo(input_shape, 1, data_type, data_layout);
+    auto addend_info = TensorInfo(rhs_shape, 1, data_type, data_layout);
+    auto dst_info    = TensorInfo();
+
+    ElementwiseDescriptor add_desc{ ArithmeticOperation::ADD };
+
+    CLScheduler::get().default_reinit();
+    const auto    cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+    OperatorGraph op_graph;
+
+    const auto op_input  = add_tensor(op_graph, input_info);
+    const auto op_addend = add_tensor(op_graph, addend_info);
+    const auto op_dst    = add_tensor(op_graph, dst_info);
+
+    add_op_elementwise_op(op_graph, add_desc, op_input, op_addend, op_dst);
+
+    const ClWorkloadContext workload_ctx{ GpuInfo{ CLScheduler::get().target() } };
+    ClWorkload              workload;
+    build(workload, op_graph, workload_ctx);
+
+    ClCompositeOperator op;
+    op.configure(cl_compile_ctx, workload);
+
+    // Construct tensors
+    CLTensor t_input{};
+    CLTensor t_addend{};
+    CLTensor t_dst{};
+
+    // Init tensors
+    t_input.allocator()->init(input_info);
+    t_addend.allocator()->init(addend_info);
+    t_dst.allocator()->init(dst_info);
+
+    // Allocate and fill tensors
+    t_input.allocator()->allocate();
+    t_addend.allocator()->allocate();
+    t_dst.allocator()->allocate();
+
+    // Fill
+    fill<float>(CLAccessor(t_input), 0, library.get());
+    fill<float>(CLAccessor(t_addend), 1, library.get());
+
+    // Pack tensors
+    OpTensorBinding bp_tensors({ { op_input, &t_input },
+        { op_addend, &t_addend },
+        { op_dst, &t_dst }
+    });
+
+    // Populate prepare and run pack-maps (including allocating aux tensors)
+    ClAuxTensorData aux_tensor_data{};
+    TensorPackMap   prepare_pack_map{};
+    TensorPackMap   run_pack_map{};
+    bind_tensors(aux_tensor_data, prepare_pack_map, run_pack_map, workload, bp_tensors);
+
+    op.prepare(prepare_pack_map);
+    op.run(run_pack_map);
+
+    // Create reference
+    SimpleTensor<float> ref_input{ input_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
+    SimpleTensor<float> ref_addend{ rhs_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
+
+    // Fill reference
+    fill<float>(ref_input, 0, library.get());
+    fill<float>(ref_addend, 1, library.get());
+
+    auto ref_input_nchw  = reference::permute(ref_input, PermutationVector(1U, 2U, 0U));
+    auto ref_addend_nchw = reference::permute(ref_addend, PermutationVector(1U, 2U, 0U));
+
+    auto dst_shape_nchw = dst_shape;
+    permute(dst_shape_nchw, PermutationVector(1U, 2U, 0U));
+
+    auto ref_t_dst_nchw = reference::arithmetic_operation(
+                              ArithmeticOperation::ADD,
+                              ref_input_nchw,
+                              ref_addend_nchw,
+                              data_type,
+                              ConvertPolicy{});
+
+    const auto ref_t_dst = reference::permute(ref_t_dst_nchw, PermutationVector(2U, 0U, 1U));
+
+    RelativeTolerance<float> tolerance_f32(0.001f);
+    validate(CLAccessor(t_dst), ref_t_dst_nchw, tolerance_f32);
+}
+TEST_CASE(DivFloor, framework::DatasetMode::ALL)
+{
+    // x = floor(div(input, input2))
+    const auto data_type    = DataType::F32;
+    const auto eltwise_info = ElementwiseDescriptor{ ArithmeticOperation::DIV };
+
+    // Tensor Values
+    const auto width  = 7U;
+    const auto height = 6U;
+
+    // Shapes
+    const auto input1_shape = TensorShape(width, height);
+    const auto input2_shape = TensorShape(width, height);
+    const auto dst_shape    = TensorShape(width, height);
+
+    // Create reference
+    SimpleTensor<float> ref_src_nhwc{ input1_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
+    SimpleTensor<float> ref_src2_nhwc{ input2_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
+
+    // Fill reference
+    fill<float>(ref_src_nhwc, 0, library.get());
+    fill<float>(ref_src2_nhwc, 1, library.get());
+
+    auto ref_src  = reference::permute(ref_src_nhwc, PermutationVector(1U, 2U, 0U));
+    auto ref_src2 = reference::permute(ref_src2_nhwc, PermutationVector(1U, 2U, 0U));
+
+    TensorShape dst_shape_nchw{ dst_shape };
+    permute(dst_shape_nchw, PermutationVector(1U, 2U, 0U));
+
+    const auto ref_dst_nchw = reference::floor_layer(reference::arithmetic_operation(
+                                                         ArithmeticOperation::DIV,
+                                                         ref_src,
+                                                         ref_src2,
+                                                         data_type,
+                                                         ConvertPolicy::SATURATE));
+
+    const auto ref_t_dst = reference::permute(ref_dst_nchw, PermutationVector(2U, 0U, 1U));
+
+    // Tensor Info
+    auto input1_info = TensorInfo(input1_shape, 1, data_type, DataLayout::NHWC);
+    auto input2_info = TensorInfo(input2_shape, 1, data_type, DataLayout::NHWC);
+    auto dst_info    = TensorInfo();
+    auto acc_info    = TensorInfo(); // Intermediate tensor for division
+
+    // Initialise Scheduler
+    CLScheduler::get().default_reinit();
+    const auto    cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+    OperatorGraph op_graph;
+
+    // add tensors
+    auto op_input1 = add_tensor(op_graph, input1_info);
+    auto op_input2 = add_tensor(op_graph, input2_info);
+    auto op_acc    = add_tensor(op_graph, acc_info);
+    auto op_dst    = add_tensor(op_graph, dst_info);
+
+    add_op_elementwise_op(op_graph, eltwise_info, op_input1, op_input2, op_acc);
+    add_op_floor(op_graph, FloorDescriptor(), op_acc, op_dst);
+
+    const ClWorkloadContext workload_ctx{ GpuInfo{ CLScheduler::get().target() } };
+    ClWorkload              workload;
+    build(workload, op_graph, workload_ctx);
+
+    ClCompositeOperator op;
+    op.configure(cl_compile_ctx, workload);
+
+    // Configure and add tensors.
+    CLTensor t_input1{};
+    CLTensor t_input2{};
+    CLTensor t_dst{};
+
+    // Init Tensors
+    t_input1.allocator()->init(input1_info);
+    t_input2.allocator()->init(input2_info);
+    t_dst.allocator()->init(dst_info);
+
+    // Allocate and fill tensors
+    t_input1.allocator()->allocate();
+    t_input2.allocator()->allocate();
+    t_dst.allocator()->allocate();
+
+    fill<float>(CLAccessor(t_input1), 0, library.get());
+    fill<float>(CLAccessor(t_input2), 1, library.get());
+
+    // "Pack" tensors
+    OpTensorBinding bp_tensors({ { op_input1, &t_input1 },
+        { op_input2, &t_input2 },
+        { op_dst, &t_dst }
+    });
+
+    // Populate prepare and run pack-maps (including allocating aux tensors)
+    ClAuxTensorData aux_tensor_data{};
+    TensorPackMap   prepare_pack_map{};
+    TensorPackMap   run_pack_map{};
+    bind_tensors(aux_tensor_data, prepare_pack_map, run_pack_map, workload, bp_tensors);
+
+    op.prepare(prepare_pack_map);
+    op.run(run_pack_map);
+
+    RelativeTolerance<float> tolerance_f32(0.001f); /**< Tolerance value for comparing reference's output against implementation's output for floating point data types */
+    validate(CLAccessor(t_dst), ref_dst_nchw, tolerance_f32);
+}
+TEST_CASE(Dconv2dAddDiv, framework::DatasetMode::ALL)
+{
+    // output = div(divend, add(addend, conv2d1x1(direct_conv)(input, weights, bias)))
+    const auto data_type   = DataType::F32;
+    const auto data_layout = DataLayout::NHWC;
+
+    const auto input_shape  = TensorShape(384, 12, 12);
+    const auto weight_shape = TensorShape(384, 1, 1, 16);
+    const auto dst_shape    = TensorShape(16, 12, 12);
+
+    // Tensor Info
+    auto input_info  = TensorInfo(input_shape, 1, data_type, data_layout);
+    auto weight_info = TensorInfo(weight_shape, 1, data_type, data_layout);
+    auto addend_info = TensorInfo(dst_shape, 1, data_type, data_layout);
+    auto divend_info = TensorInfo(dst_shape, 1, data_type, data_layout);
+    auto acc_info    = TensorInfo(); // Intermediate tensor for conv
+    auto acc_1_info  = TensorInfo();
+    auto dst_info    = TensorInfo();
+
+    Conv2dDescriptor      conv2d_desc{};
+    ElementwiseDescriptor add_desc{ ArithmeticOperation::ADD };
+    ElementwiseDescriptor div_desc{ ArithmeticOperation::DIV };
+
+    CLScheduler::get().default_reinit();
+    const auto    cl_compile_ctx = CLKernelLibrary::get().get_compile_context();
+    OperatorGraph op_graph;
+
+    const auto op_input  = add_tensor(op_graph, input_info);
+    const auto op_weight = add_tensor(op_graph, weight_info);
+    const auto op_addend = add_tensor(op_graph, addend_info);
+    const auto op_divend = add_tensor(op_graph, divend_info);
+    const auto op_acc    = add_tensor(op_graph, acc_info);   // temp accumulator; TensorInfo to be inferred
+    const auto op_acc_1  = add_tensor(op_graph, acc_1_info); // temp accumulator; TensorInfo to be inferred
+    const auto op_dst    = add_tensor(op_graph, dst_info);
+
+    auto conv2d = add_op_conv2d(op_graph, conv2d_desc, op_input, op_weight, op_acc);
+    force_conv2d_method(op_graph, conv2d, ConvolutionMethod::DIRECT);
+    add_op_elementwise_op(op_graph, add_desc, op_acc, op_addend, op_acc_1);
+    add_op_elementwise_op(op_graph, div_desc, op_acc_1, op_divend, op_dst);
+
+    const ClWorkloadContext workload_ctx{ GpuInfo{ CLScheduler::get().target() } };
+    ClWorkload              workload;
+    build(workload, op_graph, workload_ctx);
+
+    ClCompositeOperator op;
+    op.configure(cl_compile_ctx, workload);
+
+    // Construct tensors
+    CLTensor t_input{};
+    CLTensor t_weight{};
+    CLTensor t_addend{};
+    CLTensor t_divend{};
+    CLTensor t_dst{};
+
+    // Init tensors
+    t_input.allocator()->init(input_info);
+    t_weight.allocator()->init(weight_info);
+    t_divend.allocator()->init(divend_info);
+    t_addend.allocator()->init(addend_info);
+    t_dst.allocator()->init(dst_info);
+
+    // Allocate and fill tensors
+    t_input.allocator()->allocate();
+    t_weight.allocator()->allocate();
+    t_divend.allocator()->allocate();
+    t_addend.allocator()->allocate();
+    t_dst.allocator()->allocate();
+
+    // Fill
+    fill<float>(CLAccessor(t_input), 0, library.get());
+    fill<float>(CLAccessor(t_weight), 1, library.get());
+    fill<float>(CLAccessor(t_addend), 2, library.get());
+    fill<float>(CLAccessor(t_divend), 3, library.get());
+
+    // Pack tensors
+    OpTensorBinding bp_tensors({ { op_input, &t_input },
+        { op_weight, &t_weight },
+        { op_addend, &t_addend },
+        { op_divend, &t_divend },
+        { op_dst, &t_dst }
+    });
+
+    // Populate prepare and run pack-maps (including allocating aux tensors)
+    ClAuxTensorData aux_tensor_data{};
+    TensorPackMap   prepare_pack_map{};
+    TensorPackMap   run_pack_map{};
+    bind_tensors(aux_tensor_data, prepare_pack_map, run_pack_map, workload, bp_tensors);
+
+    op.prepare(prepare_pack_map);
+    op.run(run_pack_map);
+
+    // Create reference
+    SimpleTensor<float> ref_input{ input_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
+    SimpleTensor<float> ref_weight{ weight_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
+    SimpleTensor<float> ref_bias_placeholder{ dst_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
+    SimpleTensor<float> ref_addend{ dst_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
+    SimpleTensor<float> ref_divend{ dst_shape, data_type, 1, QuantizationInfo(), DataLayout::NHWC };
+
+    // Fill reference
+    fill<float>(ref_input, 0, library.get());
+    fill<float>(ref_weight, 1, library.get());
+    fill<float>(ref_addend, 2, library.get());
+    fill<float>(ref_divend, 3, library.get());
+
+    auto ref_input_nchw            = reference::permute(ref_input, PermutationVector(1U, 2U, 0U));
+    auto ref_weight_nchw           = reference::permute(ref_weight, PermutationVector(1U, 2U, 0U));
+    auto ref_bias_placeholder_nchw = reference::permute(ref_bias_placeholder, PermutationVector(1U, 2U, 0U));
+    auto ref_addend_nchw           = reference::permute(ref_addend, PermutationVector(1U, 2U, 0U));
+    auto ref_divend_nchw           = reference::permute(ref_divend, PermutationVector(1U, 2U, 0U));
+
+    auto dst_shape_nchw = dst_shape;
+    permute(dst_shape_nchw, PermutationVector(1U, 2U, 0U));
+
+    PadStrideInfo legacy_pad_stride(conv2d_desc.stride.x(), conv2d_desc.stride.y(), conv2d_desc.pad.left, conv2d_desc.pad.right, conv2d_desc.pad.top, conv2d_desc.pad.bottom, DimensionRoundingType{});
+    auto          ref_acc_nchw = reference::arithmetic_operation(
+                                     ArithmeticOperation::ADD,
+                                     ref_addend_nchw,
+                                     reference::convolution_layer(ref_input_nchw, ref_weight_nchw, ref_bias_placeholder_nchw, dst_shape_nchw, legacy_pad_stride, conv2d_desc.dilation),
+                                     data_type,
+                                     ConvertPolicy{});
+
+    auto ref_t_dst_nchw = reference::arithmetic_operation(
+                              ArithmeticOperation::DIV,
+                              ref_acc_nchw,
+                              ref_divend_nchw,
+                              data_type,
+                              ConvertPolicy{});
+
+    const auto ref_t_dst = reference::permute(ref_t_dst_nchw, PermutationVector(2U, 0U, 1U));
+
+    RelativeTolerance<float> tolerance_f32(0.001f);
+    validate(CLAccessor(t_dst), ref_t_dst_nchw, tolerance_f32);
+}
+
+TEST_SUITE_END() // ArbitraryFusion
+TEST_SUITE_END() // DYNAMIC_FUSION
+TEST_SUITE_END() // UNIT
+TEST_SUITE_END() // CL
+
+} // namespace validation
+} // namespace test
+} // namespace arm_compute
+
+#endif /* ENABLE_EXPERIMENTAL_DYNAMIC_FUSION */