Merge kernel prototype patch

Resolves: COMPMID-5151

Signed-off-by: Giorgio Arena <giorgio.arena@arm.com>
Change-Id: Ic4024d5cd4819fe917a1d49621f1866ae2e90a37
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7260
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: SiCong Li <sicong.li@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.cpp b/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.cpp
new file mode 100644
index 0000000..e40f9c6
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h"
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h"
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClKernelComponents.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ClKernelBlueprint::ClKernelBlueprint()
+    : _impl{ std::make_unique<ClKernelBlueprint::Implementation>() }
+{
+}
+
+ClKernelBlueprint::~ClKernelBlueprint() = default;
+
+ClKernelBlueprint::Implementation &ClKernelBlueprint::impl()
+{
+    return *_impl;
+}
+const ClKernelBlueprint::Implementation &ClKernelBlueprint::impl() const
+{
+    return *_impl;
+}
+
+Status add_tensor_argument(ClKernelBlueprint &kernel_blueprint, const ClTensorDescriptor &tensor_desc, ArgumentID &id)
+{
+    id = kernel_blueprint.impl().add_kernel_argument(tensor_desc);
+    return Status{};
+}
+
+Status add_tensor_intermed(ClKernelBlueprint &kernel_blueprint, ArgumentID &id)
+{
+    id = kernel_blueprint.impl().add_intermediate_tensor();
+    return Status{};
+}
+
+Status add_kcomp_gemm_native(ClKernelBlueprint &kernel_blueprint, const ClKernelComponentDescriptor &, const GemmNativeDescriptor &,
+                             ArgumentID lhs_id, ArgumentID rhs_id, ArgumentID bias_id, ArgumentID &dst_id)
+{
+    kernel_blueprint.impl().validate_arg_ids({ lhs_id, rhs_id, bias_id, dst_id });
+
+    kernel_blueprint.impl().add_component(
+        std::make_unique<ClGemmNativeKernelComponent>(
+            SharedVarLink{ lhs_id, SharedVarIO::Input, kernel_blueprint.impl().group(lhs_id) },
+            SharedVarLink{ rhs_id, SharedVarIO::Input, kernel_blueprint.impl().group(rhs_id) },
+            SharedVarLink{ dst_id, SharedVarIO::Output, kernel_blueprint.impl().group(dst_id) },
+            SharedVarLink{ bias_id, SharedVarIO::Input, kernel_blueprint.impl().group(bias_id) }));
+
+    return Status{};
+}
+
+Status add_kcomp_eltwise_add(ClKernelBlueprint &kernel_blueprint, const ClKernelComponentDescriptor &, const EltwiseAddDescriptor &,
+                             ArgumentID src0_id, ArgumentID src1_id, ArgumentID &dst_id)
+{
+    kernel_blueprint.impl().add_component(
+        std::make_unique<ClElementwiseAddKernelComponent>(
+            SharedVarLink{ src0_id, SharedVarIO::Input, kernel_blueprint.impl().group(src0_id) },
+            SharedVarLink{ src1_id, SharedVarIO::Input, kernel_blueprint.impl().group(src1_id) },
+            SharedVarLink{ dst_id, SharedVarIO::Output, kernel_blueprint.impl().group(dst_id) }));
+
+    return Status{};
+}
+Status add_kcomp_activation(ClKernelBlueprint &, const ClKernelComponentDescriptor &, const ActivationDescriptor &, ArgumentID, ArgumentID &)
+{
+    return Status{};
+}
+Status add_kcomp_store(ClKernelBlueprint &kernel_blueprint, const ClKernelComponentDescriptor &, ArgumentID src_tile, ArgumentID dst_tile, const StoreType &store_type)
+{
+    switch(store_type)
+    {
+        case StoreType::StoreBlockBoundaryAware:
+            kernel_blueprint.impl().add_component(
+                std::make_unique<ClStoreBlockBoundaryAwareKernelComponent>(
+                    SharedVarLink{ src_tile, SharedVarIO::Input, kernel_blueprint.impl().group(src_tile) },
+                    SharedVarLink{ dst_tile, SharedVarIO::Output, kernel_blueprint.impl().group(dst_tile) }));
+            break;
+        default:
+            ARM_COMPUTE_ERROR("Store mode not yet supported.");
+    }
+
+    return Status{};
+}
+
+Status set_tile_info(ClKernelBlueprint &bp, const TileDescriptor &tile_info)
+{
+    bp.impl().set_tile_info(tile_info);
+    return Status{};
+}
+Status build(ClKernelCode &code, const ClCodeBuilderContext &, ClKernelBlueprint &kernel_blueprint)
+{
+    code.name = kernel_blueprint.impl().build_kernel_name();
+    code.code = kernel_blueprint.impl().build_code();
+
+    code.config_id     = kernel_blueprint.impl().build_config_id();
+    code.build_options = kernel_blueprint.impl().build_options();
+    code.window        = kernel_blueprint.impl().get_execution_window();
+    code.arguments     = kernel_blueprint.impl().get_arguments();
+
+    return Status{};
+}
+Status tune_static(ClExecutionDescriptor &, const ClKernelCode &)
+{
+    return Status{};
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h
new file mode 100644
index 0000000..15622c8
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_CLKERNELBUILDINGAPI_H
+#define ARM_COMPUTE_EXPERIMENTAL_CLKERNELBUILDINGAPI_H
+
+#include "arm_compute/core/CL/CLCompileContext.h"
+#include "arm_compute/core/Window.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+using ArgumentID = int32_t;
+
+static constexpr ArgumentID g_arg_placeholder = -1;
+
+/** Verbose and explicit way to enumerate all the tensor arguments variants used by
+ *  all kernel implementations. This avoids any ambiguity in what kernel arguments are passed
+ */
+enum class TensorArgType : int
+{
+    Scalar,
+
+    Vector,
+
+    Image,
+    Image_Reinterpret_As_3D,
+    Image_Export_To_ClImage2D,
+
+    Image_3D, // 3D Tensor represented as a 2D Image + stride_z
+    Image_3D_Export_To_ClImage2D,
+
+    Tensor_3D,
+    Tensor_4D
+};
+/** Describes all the info required to add a kernel argument at run time */
+struct ClKernelArgRuntimeDescriptor
+{
+    ClKernelArgRuntimeDescriptor(int arg_id, TensorArgType type, bool slide_along_dimz = true)
+        : arg_id{ arg_id }, tensor_arg_type{ type }, slide_along_dimz{ slide_along_dimz }
+    {
+    }
+    ~ClKernelArgRuntimeDescriptor() = default;
+    int           arg_id{ g_arg_placeholder }; // Arg ID in the blueprint
+    TensorArgType tensor_arg_type{ TensorArgType::Image };
+    bool          slide_along_dimz{ true };
+};
+
+using ClKernelArgList = std::vector<ClKernelArgRuntimeDescriptor>;
+
+/** Intermediate representation of the final, complete kernel source. */
+class ClKernelBlueprint
+{
+public:
+    ClKernelBlueprint();
+    ~ClKernelBlueprint();
+
+private:
+    struct Implementation;
+    std::unique_ptr<Implementation> _impl;
+
+public:
+    Implementation       &impl();
+    const Implementation &impl() const;
+};
+
+///// Kernel Components /////
+
+/** Meta information about all Cl Kernel Components */
+struct ClKernelComponentDescriptor
+{
+    int32_t version{ 1 }; /**< Operator version */
+};
+
+/** Component: Tensor Argument */
+struct ClTensorDescriptor
+{
+    ClTensorDescriptor(const ITensorInfo *info, unsigned int dim)
+        : tensor_info(info), slice_dim(dim)
+    {
+    }
+
+    const ITensorInfo *tensor_info;
+    unsigned int       slice_dim;
+};
+
+Status add_tensor_argument(ClKernelBlueprint &, const ClTensorDescriptor &, ArgumentID &);
+Status add_tensor_intermed(ClKernelBlueprint &, ArgumentID &);
+
+/** Component: Gemm Native */
+struct GemmNativeDescriptor
+{
+    float             alpha{};
+    float             beta{};
+    unsigned int      m{};
+    unsigned int      n{};
+    unsigned int      k{};
+    unsigned int      depth_output_gemm3d{};
+    bool              reinterpret_input_as_3d{};
+    bool              broadcast_bias{};
+    bool              fp_mixed_precision{};
+    bool              has_pad_y{};
+    int               nmult_transpose1xW_width{};
+    int               mult_interleave4x4_height{};
+    GEMMLHSMatrixInfo lhs_info{};
+    GEMMRHSMatrixInfo rhs_info{};
+    int32_t           a_offset{};
+    int32_t           b_offset{};
+};
+
+Status add_kcomp_gemm_native(ClKernelBlueprint &, const ClKernelComponentDescriptor &, const GemmNativeDescriptor &, ArgumentID input_id,
+                             ArgumentID weights_id, ArgumentID bias_id, ArgumentID &dst_id);
+
+/** Component: Eltwise Add */
+struct EltwiseAddDescriptor
+{
+    ConvertPolicy convert_policy{ ConvertPolicy::SATURATE };
+};
+Status add_kcomp_eltwise_add(ClKernelBlueprint &, const ClKernelComponentDescriptor &, const EltwiseAddDescriptor &, ArgumentID src0_id,
+                             ArgumentID src1_id, ArgumentID &dst_id);
+
+/** Component: Activation */
+struct ActivationDescriptor
+{
+};
+Status add_kcomp_activation(ClKernelBlueprint &, const ClKernelComponentDescriptor &, const ActivationDescriptor &, ArgumentID src_id, ArgumentID &dst_id);
+
+enum class ClippingStrategy
+{
+    TOP_LEFT,
+    TOP_RIGHT,
+    BOTTOM_LEFT,
+    BOTTOM_RIGHT,
+};
+
+/** Component: Store */
+struct TileDescriptor
+{
+    Size2D           tile_dims{};
+    Size2D           boundaries{};
+    ClippingStrategy clipping{ ClippingStrategy::TOP_LEFT };
+
+    TileDescriptor()
+    {
+    }
+
+    TileDescriptor(Size2D dims, const Size2D &bound, const ClippingStrategy &clip)
+        : tile_dims(dims), boundaries(bound), clipping(clip)
+    {
+    }
+
+    bool empty() const
+    {
+        return (tile_dims.area() == 0) || (boundaries.area() == 0);
+    }
+};
+
+enum class StoreType
+{
+    VStore,
+    VStorePartial,
+    StoreRow,
+    ConvertStoreRow,
+    StoreBlock,
+    ConvertStoreBlock,
+    StoreRowPartial,
+    StoreBlockPartial,
+    StoreBlockBoundaryAware,
+    StoreVectorSelect,
+    TStoreIndirectWidthSelect
+};
+
+Status add_kcomp_store(ClKernelBlueprint &, const ClKernelComponentDescriptor &, ArgumentID src_id, ArgumentID dst_id, const StoreType &store_type);
+
+///// Kernel Components /////
+
+///// Building /////
+
+/** Information required for kernel compilation. The build results of KernelBlueprint */
+struct ClKernelCode
+{
+    std::string     name{};          /**< Kernel name */
+    std::string     code{};          /**< Kernel source code */
+    std::string     config_id{};     /**< Generated from blueprint based on complex component */
+    CLBuildOptions  build_options{}; /**< Kernel build options */
+    Window          window{};        /**< Execution window */
+    ClKernelArgList arguments{};     /**< Kernel argument specficiations */
+
+    bool operator==(const ClKernelCode &other) const
+    {
+        return name == other.name && code == other.code && build_options == other.build_options;
+    }
+};
+
+/** GPU information for building the @ref ClKernelCode */
+struct GpuInfo
+{
+    GPUTarget target{ GPUTarget::UNKNOWN };
+};
+
+/** All information required for building the @ref ClKernelCode */
+struct ClCodeBuilderContext
+{
+    GpuInfo gpu_info{};
+};
+
+Status set_tile_info(ClKernelBlueprint &, const TileDescriptor &);
+
+/** Build final kernel source from KernelBlueprint */
+Status build(ClKernelCode &code, const ClCodeBuilderContext &, ClKernelBlueprint &);
+
+///// Building /////
+
+///// Tuning /////
+struct ClExecutionDescriptor
+{
+    cl::NDRange suggested_lws{}; /**< Suggested local work-group size for optimal performance if not zero */
+    cl::NDRange gws{};           /**< Global work-group to be used */
+};
+
+Status tune_static(ClExecutionDescriptor &, const ClKernelCode &);
+
+///// Tuning /////
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif //ARM_COMPUTE_EXPERIMENTAL_CLKERNELBUILDINGAPI_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h
new file mode 100644
index 0000000..3b5160a
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMMON_H
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMMON_H
+
+#include "arm_compute/core/CL/CLCompileContext.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/GPUTarget.h"
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h"
+
+#include <queue>
+#include <stack>
+#include <string>
+#include <unordered_set>
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+/** We introduce the concept of *Shared Variables* in the context of kernel building.
+ *  They are variables that can be accessed / shared among all the kernel components within a single kernel.
+ *  For now we consider 2 groups of shared variables:
+ *      Argument: The argument variables (parameters) of a kernel
+ *      Automatic: The automatic variables declared inside a kernel
+ *  All Shared Variables have the same kernel scope, and are thus visible to all kernel components
+*/
+
+enum class SharedVarIO
+{
+    Input,
+    Output
+};
+
+enum class SharedVarGroup
+{
+    Argument, // Parameters to a kernel function
+    Automatic // Automatic variables declared within the kernel body
+};
+
+/** Specifies a shared variable ink for a component.
+ * It describes all the information that's availbale when a component is constructed / added:
+ *  e.g. its linkage (via ArgumentID and io) and its group
+ * This is not shared variable on its own, but is used for instantiating a SharedVar when building the code
+ */
+struct SharedVarLink
+{
+    ArgumentID     arg_id{ g_arg_placeholder };
+    SharedVarIO    io{ SharedVarIO::Input };
+    SharedVarGroup group{ SharedVarGroup::Argument };
+    bool           is_empty() const
+    {
+        return arg_id == g_arg_placeholder;
+    }
+};
+
+/** A table of all the variables used in the kernel / blueprint
+ * NOTE: the order they appear in the table is the order of their "declaration" in the component code, and is also their ID
+ * NOTE: the variables all have the scope of the full kernel function
+ */
+class SharedVarTable
+{
+public:
+    struct SharedVar
+    {
+        SharedVarGroup               group;
+        std::string                  uniq_name; // Unique name, also the final variable name used in the built code
+        ClKernelArgRuntimeDescriptor desc;      // Automatic variables can and should still be described using this struct
+    };
+
+    using Arguments = std::vector<SharedVar>;
+
+    /** @note: The order of insertion is important. There is one precondition:
+     *        PRECOND: The components have been sorted topologically / is being traversed in topological order
+     *                 This ensures that all the consumer var links (Output, Automatic Links) can consume (return) the producer var links when they're referred
+     */
+    SharedVar add(SharedVarLink var_link, ClKernelArgRuntimeDescriptor runtime_desc, const std::string &name = "unnamed")
+    {
+        ARM_COMPUTE_ERROR_ON_MSG(var_link.is_empty(), "Non-empty SharedVarLink expected");
+        auto              var_id = _num_var;
+        std::stringstream ss;
+        ss << name << "_" << var_id;
+        const auto uniq_name = ss.str();
+        SharedVar  var{ var_link.group, uniq_name, runtime_desc };
+
+        if(var_link.group == SharedVarGroup::Argument)
+        {
+            _arguments.emplace(var_id, var);
+            _num_var++;
+            _var_id_lut[var_link.arg_id] = var_id;
+        }
+        else if(var_link.group == SharedVarGroup::Automatic)
+        {
+            if(var_link.io == SharedVarIO::Output)
+            {
+                _global_vars.emplace(var_id, var);
+                _num_var++;
+                _var_id_lut[var_link.arg_id] = var_id;
+            }
+            else
+            {
+                // For the input link, the var (and thus its arg_id) will always have been added by the time we get here if we traverse components in topological order
+                var = get_var(var_link.arg_id);
+            }
+        }
+        else
+        {
+            ARM_COMPUTE_ERROR("Unrecognised SharedVarGroup");
+        }
+        return var;
+    }
+
+    SharedVar get_var(ArgumentID arg_id) const
+    {
+        const auto var_id = _var_id_lut.at(arg_id); // arg_id has to exist in lut to begin with
+        auto       it     = _global_vars.find(var_id);
+        if(it != _global_vars.end())
+        {
+            return it->second;
+        }
+        it = _arguments.find(var_id);
+        if(it != _arguments.end())
+        {
+            return it->second;
+        }
+        ARM_COMPUTE_ERROR("Cannot find component variable");
+    }
+
+    /** @note The arguments are returned in the order they are added
+     */
+    Arguments get_kernel_arguments() const
+    {
+        Arguments args{};
+        for(const auto &a : _arguments)
+        {
+            args.push_back(a.second);
+        }
+        return args;
+    }
+
+private:
+    using VarID = int32_t;
+
+private:
+    std::map<VarID, SharedVar>            _global_vars{};
+    std::map<VarID, SharedVar>            _arguments{};
+    std::unordered_map<ArgumentID, VarID> _var_id_lut{};
+    VarID _num_var{ 0 };
+};
+
+enum class ComponentType
+{
+    Simple,
+    Complex,
+    Store
+};
+
+using ComponentID   = int32_t;
+using ComponentList = std::vector<ComponentID>;
+class IClKernelComponent
+{
+public:
+    using Link = SharedVarLink;
+    using Tag  = std::string;
+    struct TagVal
+    {
+        TagVal() = default;
+        TagVal(SharedVarTable::SharedVar var)
+            : value{ var.uniq_name }
+        {
+        }
+
+        TagVal(ComponentID id)
+            : value{ std::to_string(id) }
+        {
+        }
+
+        std::string value{};
+    };
+    using TagLUT = std::unordered_map<Tag, TagVal>; // Used to instantiating a code template / replacing tags
+public:
+    virtual ~IClKernelComponent()                        = default;
+    virtual ComponentType     get_component_type() const = 0;
+    virtual std::vector<Link> get_links() const          = 0;
+    virtual std::string       name() const               = 0;
+
+    static std::string replace_tags(const std::string &code_template, const TagLUT &tags)
+    {
+        std::string                     replaced_code = "";
+        std::unordered_set<std::string> used_tags{};
+        bool                            scanning_pattern = false;
+        std::string                     pattern_found    = "";
+        for(size_t i = 0; i < code_template.size() - 1; ++i)
+        {
+            if(!scanning_pattern)
+            {
+                if(code_template[i] == '{' && code_template[i + 1] == '{')
+                {
+                    i += 1;
+                    scanning_pattern = true;
+                    pattern_found    = "";
+                }
+                else
+                {
+                    replaced_code += code_template[i];
+                }
+            }
+            else
+            {
+                if(code_template[i] == '}' && code_template[i + 1] == '}')
+                {
+                    i += 1;
+                    scanning_pattern = false;
+                    std::string err  = "Pattern " + pattern_found + " not found in tags";
+                    ARM_COMPUTE_ERROR_ON_MSG(tags.find(pattern_found) == tags.end(), err.c_str());
+                    replaced_code += tags.find(pattern_found)->second.value;
+                    used_tags.insert(pattern_found);
+                }
+                else
+                {
+                    pattern_found += code_template[i];
+                }
+            }
+        }
+        // Check for unused tags
+        for(const auto &tag : tags)
+        {
+            ARM_COMPUTE_UNUSED(tag);
+            ARM_COMPUTE_ERROR_ON_MSG(used_tags.find(tag.first) == used_tags.end(), "Warning: unused tags");
+        }
+        return replaced_code;
+    }
+    ComponentID id() const
+    {
+        return _id;
+    }
+    void set_id(ComponentID id)
+    {
+        _id = id;
+    }
+
+    virtual std::set<std::string> get_headers_list() const
+    {
+        return std::set<std::string> {};
+    }
+
+    virtual std::string get_additional_macros() const
+    {
+        return "";
+    }
+
+    virtual std::string get_component_code() const
+    {
+        return "";
+    }
+    /** "Allocate" all shared variables used in a component to the @p vtable, and generate a TagLUT used to instantiate the component code
+     *
+     * @param vtable
+     * @return TagLUT
+     */
+    virtual TagLUT allocate_vars(SharedVarTable &vtable) const = 0;
+
+    virtual std::string get_dst_addr_calculation() const
+    {
+        return "";
+    }
+
+private:
+    ComponentID _id{};
+};
+
+using ComponentUniquePtr = std::unique_ptr<IClKernelComponent>;
+
+/** Intermediate representation of the final, complete kernel source.
+ */
+struct ClKernelBlueprint::Implementation
+{
+public:
+    Implementation()  = default;
+    ~Implementation() = default;
+
+public:
+    ArgumentID add_kernel_argument(const ClTensorDescriptor &tensor_desc)
+    {
+        _kernel_arguments.insert(std::make_pair(_num_args, tensor_desc));
+        _shared_var_group_lut[_num_args] = SharedVarGroup::Argument;
+        return _num_args++;
+    }
+
+    ArgumentID add_intermediate_tensor()
+    {
+        _intermediate_tensors.insert(_num_args);
+        _shared_var_group_lut[_num_args] = SharedVarGroup::Automatic;
+        return _num_args++;
+    }
+
+    void set_tile_info(const TileDescriptor &tile_info)
+    {
+        _tile_info = tile_info;
+    }
+
+    SharedVarGroup group(ArgumentID arg_id) const
+    {
+        if(arg_id == g_arg_placeholder)
+        {
+            // In case of placeholder, don't care what we return;
+            return SharedVarGroup::Argument;
+        }
+        return _shared_var_group_lut.at(arg_id);
+    }
+
+    void validate_arg_ids(std::initializer_list<ArgumentID> args) const
+    {
+        for(const auto arg_id : args)
+        {
+            ARM_COMPUTE_UNUSED(arg_id);
+            ARM_COMPUTE_ERROR_ON_MSG(_kernel_arguments.find(arg_id) == _kernel_arguments.end() && _intermediate_tensors.find(arg_id) == _intermediate_tensors.end() && arg_id != g_arg_placeholder,
+                                     "Trying to use an argument that hasn't been added to the blueprint");
+        }
+    }
+
+    void add_component(ComponentUniquePtr component)
+    {
+        if(component->get_component_type() == ComponentType::Complex)
+        {
+            ++_num_complex_components;
+            ARM_COMPUTE_ERROR_ON_MSG(_num_complex_components > 1, "Only one complex component per blueprint is supported.");
+        }
+
+        // This flag specifies if the current component is the root of the component graph
+        // If the root is set to -1, it means that a root hasn't been added yet
+        bool is_graph_root = true;
+
+        // Get an unique ID for the component that's being added
+        const ComponentID component_id = _num_components++;
+        component->set_id(component_id);
+
+        // Add this component to the component graph. Don't connect it to anything yet
+        _component_graph.emplace(component_id, ComponentList{});
+
+        int32_t positional_arg = 0;
+
+        // For every { arg_id, arg_io } passed along with this component...
+        for(const auto &link : component->get_links())
+        {
+            const ArgumentID &arg_id = link.arg_id;
+            const SharedVarIO &arg_io = link.io;
+
+            // A component is considered root only if all its input arguments are kernel arguments (or placeholders, which means nullptr)
+            // This performs a check on every argument, and if one of them doesn't respect the condition, the component is not considered root
+            is_graph_root &= (_kernel_arguments.find(arg_id) != _kernel_arguments.end()) || (arg_io == SharedVarIO::Output) || (arg_id == g_arg_placeholder);
+
+            // Add the arg_id to the map describing the input/output relationship between an argument and the components that use it, if it doesn't yet exist there
+            if(_outgoing_components.find(arg_id) == _outgoing_components.end())
+            {
+                _outgoing_components.emplace(arg_id, ComponentList{});
+                _incoming_components.emplace(arg_id, ComponentList{});
+            }
+
+            // If it's an input argument, connect any other component that has it as output with this component
+            // Additionally, set this component as one that treats this argument as "Input" (append to index 0)
+            // This is used so that we keep track of whether two components use the same argument, one as input and one as output
+            if(arg_io == SharedVarIO::Input)
+            {
+                for(const auto &prev_component : _incoming_components[arg_id])
+                {
+                    _component_graph[prev_component].push_back(component_id);
+                }
+
+                _outgoing_components[arg_id].push_back(component_id);
+            }
+            // If it's an output argument, connect this component with any other component that has it as input
+            // Additionally, set this component as one that treats this argument as "Output" (append to index 1)
+            else
+            {
+                for(const auto &subseq_component : _outgoing_components[arg_id])
+                {
+                    _component_graph[component_id].push_back(subseq_component);
+                }
+
+                _incoming_components[arg_id].push_back(component_id);
+            }
+
+            ++positional_arg;
+        }
+
+        if(is_graph_root)
+        {
+            ARM_COMPUTE_ERROR_ON_MSG(_graph_root >= 0, "Trying to add more than one root to the graph");
+            _graph_root = component_id;
+        }
+
+        // Finally, add this component to the dictionary of components
+        _components.insert(std::make_pair(component_id, std::move(component)));
+    }
+
+    std::string build_kernel_name() const
+    {
+        std::string name = "";
+
+        auto stack = topological_sort();
+        while(!stack.empty())
+        {
+            name += _components.find(stack.top())->second->name() + (stack.size() > 2 ? "___" : "");
+            stack.pop();
+        }
+
+        std::cout << name << std::endl;
+        return name;
+    }
+
+    std::string build_code()
+    {
+        ARM_COMPUTE_ERROR_ON_MSG(_graph_root < 0, "No root found in the component graph");
+
+        // These data structures will hold the data from all the components in the blueprint
+        std::set<std::string>    headers_list{};
+        std::set<std::string>    additional_macros{};
+        std::vector<std::string> component_codes{}; // vector because order matters
+
+        // Go through the components graph (topological sort) and fill the data structures above
+        auto stack = topological_sort();
+        while(!stack.empty())
+        {
+            auto  curr_component_id = stack.top();
+            auto &curr_component    = _components.find(curr_component_id)->second;
+
+            auto       curr_headers_list      = curr_component->get_headers_list();
+            auto       curr_additional_macros = curr_component->get_additional_macros();
+            auto       curr_component_code    = curr_component->get_component_code();
+            const auto var_lut                = curr_component->allocate_vars(_vtable); // Ideally can be merged with get_component_code once we have finer-grained code generation technique
+            component_codes.push_back(IClKernelComponent::replace_tags(curr_component_code, var_lut));
+
+            headers_list.insert(curr_headers_list.begin(), curr_headers_list.end());
+            if(!curr_additional_macros.empty()) // Some components might not have any
+            {
+                additional_macros.insert(curr_additional_macros);
+            }
+
+            stack.pop();
+        }
+
+        // This section assembles the data gathered by traversing the graph into the string "code"
+        std::string code = "";
+
+        for(auto &header : headers_list)
+        {
+            code += "#include \"" + header + "\"\n";
+        }
+
+        for(auto &macros : additional_macros)
+        {
+            code += macros;
+        }
+
+        code += generate_kernel_signature(_vtable.get_kernel_arguments());
+
+        code += "\n{\n\n";
+
+        code += "    //------------------ START KERNEL_BUILDER_COORDINATE ---------------------\n\n";
+        code += generate_global_section();
+        code += "    //------------------ END KERNEL_BUILDER_COORDINATE ---------------------\n";
+
+        for(auto &component_code : component_codes)
+        {
+            code += component_code;
+        }
+
+        code += "}\n";
+
+        return code;
+    }
+
+    std::string build_config_id() const
+    {
+        return "";
+    }
+
+    CLBuildOptions build_options() const
+    {
+        return CLBuildOptions{};
+    }
+
+    Window get_execution_window() const
+    {
+        return Window{};
+    }
+
+    ClKernelArgList get_arguments() const
+    {
+        ClKernelArgList arg_list{};
+        for(const auto &arg_var : _vtable.get_kernel_arguments())
+        {
+            arg_list.push_back(arg_var.desc);
+        }
+        return arg_list;
+    }
+
+private:
+    void topological_sort_utility(ComponentID component_id, std::unordered_set<ComponentID> &visited, std::stack<ComponentID> &stack) const
+    {
+        visited.insert(component_id);
+
+        for(auto connected_component : _component_graph.find(component_id)->second)
+        {
+            if(visited.find(connected_component) == visited.end())
+            {
+                topological_sort_utility(connected_component, visited, stack);
+            }
+        }
+
+        stack.push(component_id);
+    }
+
+    std::stack<ComponentID> topological_sort() const
+    {
+        std::stack<ComponentID>         stack{};
+        std::unordered_set<ComponentID> visited{};
+
+        topological_sort_utility(_graph_root, visited, stack);
+
+        return stack;
+    }
+
+    std::string generate_argument_declaration(const SharedVarTable::SharedVar &var) const
+    {
+        ARM_COMPUTE_ERROR_ON_MSG(var.group != SharedVarGroup::Argument, "An argument declaration can only be generated from a kernel argument");
+        std::string code;
+        switch(var.desc.tensor_arg_type)
+        {
+            case TensorArgType::Image:
+            {
+                code += "IMAGE_DECLARATION(" + var.uniq_name + ")";
+                break;
+            }
+            case TensorArgType::Image_3D:
+            {
+                code += "IMAGE_DECLARATION(" + var.uniq_name + "),\n";
+                code += "uint " + var.uniq_name + "_stride_z";
+                break;
+            }
+            case TensorArgType::Image_3D_Export_To_ClImage2D:
+            {
+                code += "__read_only image2d_t " + var.uniq_name + "_img,\n";
+                code += "uint " + var.uniq_name + "_stride_z,\n";
+                break;
+            }
+            default:
+            {
+                ARM_COMPUTE_ERROR("Unsupported declaration generation for TensorArgType");
+            }
+        }
+        return code;
+    }
+
+    std::string generate_kernel_signature(const SharedVarTable::Arguments &argument_list) const
+    {
+        std::string code = "\n__kernel void " + build_kernel_name() + "(";
+
+        for(const auto &arg : argument_list)
+        {
+            code += "\n    " + generate_argument_declaration(arg) + ",";
+        }
+
+        code[code.length() - 1] = ')';
+
+        return code;
+    }
+
+    std::string generate_global_section() const
+    {
+        std::string code = "    uint g_x = get_global_id(0);\n";
+        code += "    uint g_y = get_global_id(1);\n";
+        code += "    uint g_z = get_global_id(2);\n\n";
+
+        size_t tile_dim_x = _tile_info.empty() ? 1 : _tile_info.tile_dims.x();
+        size_t tile_dim_y = _tile_info.empty() ? 1 : _tile_info.tile_dims.y();
+
+        switch(_tile_info.clipping)
+        {
+            case ClippingStrategy::TOP_LEFT:
+                code += "    const bool g_cond_x = (g_x == 0);\n";
+                code += "    const bool g_cond_y = (g_y == 0);\n";
+                break;
+            case ClippingStrategy::TOP_RIGHT:
+                code += "    const bool g_cond_x = ((g_x + 1) * " + std::to_string(tile_dim_x) + " >= " + std::to_string(_tile_info.boundaries.x()) + ");\n";
+                code += "    const bool g_cond_y = (g_y == 0);\n";
+                break;
+            case ClippingStrategy::BOTTOM_LEFT:
+                code += "    const bool g_cond_x = (g_x == 0);\n";
+                code += "    const bool g_cond_y = ((g_y + 1) * " + std::to_string(tile_dim_y) + " >= " + std::to_string(_tile_info.boundaries.y()) + ");\n";
+                break;
+            case ClippingStrategy::BOTTOM_RIGHT:
+                code += "    const bool g_cond_x = ((g_x + 1) * " + std::to_string(tile_dim_x) + " >= " + std::to_string(_tile_info.boundaries.x()) + ");\n";
+                code += "    const bool g_cond_y = ((g_y + 1) * " + std::to_string(tile_dim_y) + " >= " + std::to_string(_tile_info.boundaries.y()) + ");\n";
+                break;
+            default:
+                ARM_COMPUTE_ERROR("Unsupported clipping strategy");
+        }
+
+        code += "\n    REPEAT_VAR_INIT_TO_CONST(M0, uint, g_zout, 0);\n";
+        code += "    REPEAT_VAR_INIT_TO_CONST(16, uint, g_zero, 0);\n\n";
+
+        return code;
+    }
+
+    TileDescriptor _tile_info{};
+
+    int32_t _num_args{};
+    int32_t _num_components{};
+    int32_t _num_complex_components{};
+
+    // Argument, components and intermediate tensors IDs with corresponding ptrs (except intermediate)
+    std::unordered_map<ComponentID, ComponentUniquePtr> _components{};
+    std::unordered_map<ArgumentID, ClTensorDescriptor>  _kernel_arguments{};
+    std::unordered_set<ArgumentID> _intermediate_tensors{};
+    // Argument group lookup. Can be replaced by extending the ArgumentID type to include group info
+    std::unordered_map<ArgumentID, SharedVarGroup> _shared_var_group_lut{};
+
+    // Tracks all variables (e.g.: kernel arguments, kernel "global variables")
+    SharedVarTable _vtable{};
+
+    // Component directed graph (represented by an adjecency list of Component IDs)
+    // This is used to understand the ordering and bindings between components when generating the kernel
+    // It's initially set to -1 which means the graph has no root yet, since node IDs are positive numbers
+    ComponentID _graph_root{ -1 };
+    std::unordered_map<ComponentID, ComponentList> _component_graph{};
+
+    // Additional data structures used to define the relationships between components and arguments
+    // For each argument, it contains the list of components that consider it as an incoming or an outgoing argument
+    // E.g. tensor0  -> component0 -> tensor1
+    // _outgoing_components[tensor0] == {component0} (component0 is the outgoing component of tensor0. Component0 treats tensor0 as an input tensor)
+    // _incoming_components[tensor1] == {component0} (component0 is the incoming component of tensor1. Component1 treats tensor1 as an output tensor)
+    std::unordered_map<ArgumentID, ComponentList> _outgoing_components{};
+    std::unordered_map<ArgumentID, ComponentList> _incoming_components{};
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif //ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMMON_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Utils.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Utils.h
new file mode 100644
index 0000000..41ab4e3
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Utils.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_UTILS
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_UTILS
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingAPI.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+inline ::std::ostream &operator<<(::std::ostream &os, const CLBuildOptions::StringSet &build_opts)
+{
+    for(const auto &opt : build_opts)
+    {
+        os << opt << ",";
+    }
+    return os;
+}
+inline ::std::ostream &operator<<(::std::ostream &os, const CLBuildOptions &cl_build_opts)
+{
+    os << cl_build_opts.options();
+    return os;
+}
+
+inline std::string to_string(const CLBuildOptions &cl_build_opts)
+{
+    std::stringstream str;
+    str << cl_build_opts;
+    return str.str();
+}
+inline ::std::ostream &operator<<(::std::ostream &os, const ClKernelCode &code)
+{
+    os << "name: " << code.name << std::endl;
+    os << "code: " << code.code << std::endl;
+    os << "build_opts: " << code.build_options << std::endl;
+    return os;
+}
+inline std::string to_string(const ClKernelCode &code)
+{
+    std::stringstream str;
+    str << code;
+    return str.str();
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif //ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_UTILS
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.cpp b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.cpp
new file mode 100644
index 0000000..a44b5fa
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ComponentType ClElementwiseAddKernelComponent::get_component_type() const
+{
+    return ComponentType::Simple;
+}
+
+std::set<std::string> ClElementwiseAddKernelComponent::get_headers_list() const
+{
+    return std::set<std::string> { "gemm_helpers.h", "repeat.h" };
+}
+
+std::string ClElementwiseAddKernelComponent::get_component_code() const
+{
+    std::string code;
+    return R"_(
+    //------------------ START KERNEL {{meta_kernel_id}} ELTWISE_ADD ---------------------
+    // IN_0(Accumulator)   {{acc}}
+    // IN_1(Addend)                {{addend}}
+
+    // c = addend + c (mix-precision, broadcast, boundary aware)
+    {
+        __global uchar *addend_addr = {{addend}}_ptr + {{addend}}_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(g_y, M0, PARTIAL_STORE_M0) * {{addend}}_stride_y) + get_global_id(2) * {{addend}}_stride_z; \
+        LOAD_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, addend, addend_addr, 0, {{addend}}_stride_y, g_zero, PARTIAL_LOAD_M0, PARTIAL_LOAD_N0, PARTIAL_COND_Y, PARTIAL_COND_X);                                                                                        \
+        MIXED_PRECISION_ELTWISE_OP_BLOCK(ADD_X_POS_0, M0, N0, {{acc}}, addend, DATA_TYPE_ACCUMULATOR, addend_hp);
+    }
+    //------------------ END KERNEL {{meta_kernel_id}} ELTWISE_ADD ---------------------
+
+)_";
+}
+ClElementwiseAddKernelComponent::TagLUT ClElementwiseAddKernelComponent::allocate_vars(SharedVarTable &vtable) const
+{
+    // Determine which argument is the accumulator
+    Link accumulator;
+    Link addend;
+    if(_lhs.group == SharedVarGroup::Automatic)
+    {
+        accumulator = _lhs;
+        addend      = _rhs;
+    }
+    else if(_rhs.group == SharedVarGroup::Automatic)
+    {
+        accumulator = _rhs;
+        addend      = _lhs;
+    }
+    else
+    {
+        ARM_COMPUTE_ERROR("Invalid elementwise component linking");
+    }
+    return {
+        { "meta_kernel_id", id() },
+        { "acc", vtable.add(accumulator, ClKernelArgRuntimeDescriptor(accumulator.arg_id, TensorArgType::Image_3D), "add_acc") },
+        { "addend", vtable.add(addend, ClKernelArgRuntimeDescriptor(addend.arg_id, TensorArgType::Image_3D), "add_addend") },
+        // {"dst", vtable.add(_dst, ClKernelArgRuntimeDescriptor(_dst.arg_id, TensorArgType::Image_3D), "dst")}, // dst is needed for the root version and/or non-inplace version should we need one
+    };
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.h
new file mode 100644
index 0000000..c0de4ac
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLELEMENTWISEADDKERNELCOMPONENT_H
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLELEMENTWISEADDKERNELCOMPONENT_H
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+class ClElementwiseAddKernelComponent : public IClKernelComponent
+{
+public:
+    ClElementwiseAddKernelComponent(const Link &lhs, const Link &rhs, const Link &dst)
+        : _lhs{ lhs }, _rhs{ rhs }, _dst{ dst }
+    {
+    }
+    ComponentType         get_component_type() const override;
+    std::set<std::string> get_headers_list() const override;
+    std::string           get_component_code() const override;
+
+    virtual std::vector<Link> get_links() const override
+    {
+        return { _lhs, _rhs, _dst };
+    }
+
+    virtual TagLUT allocate_vars(SharedVarTable &vtable) const override;
+
+    virtual std::string name() const override
+    {
+        return "eltwise_add_" + std::to_string(id());
+    }
+
+private:
+    Link _lhs{};
+    Link _rhs{};
+    Link _dst{};
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif // ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLELEMENTWISEADDKERNELCOMPONENT_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.cpp b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.cpp
new file mode 100644
index 0000000..1521973
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.cpp
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ComponentType ClGemmNativeKernelComponent::get_component_type() const
+{
+    return ComponentType::Complex;
+}
+
+std::set<std::string> ClGemmNativeKernelComponent::get_headers_list() const
+{
+    return std::set<std::string> { "./common/experimental/gemm_fused_post_ops/act_eltwise_op_act/fp_post_ops_act_eltwise_op_act.h", "gemm_helpers.h", "repeat.h" };
+}
+
+std::string ClGemmNativeKernelComponent::get_additional_macros() const
+{
+    return R"_(
+#define VFMA(a, b, c) \
+({                    \
+    c = fma(a, b, c); \
+})
+
+#if M0 == 1
+#define RHS_VFMA_M0xN0(i, a, b, c)                                    \
+    ({                                                                \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+    })
+#elif M0 == 2 // M0 == 2
+#define RHS_VFMA_M0xN0(i, a, b, c)                                    \
+    ({                                                                \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+    })
+#elif M0 == 3 // M0 == 3
+#define RHS_VFMA_M0xN0(i, a, b, c)                                    \
+    ({                                                                \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+    })
+#elif M0 == 4 // M0 == 4
+#define RHS_VFMA_M0xN0(i, a, b, c)                                    \
+    ({                                                                \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
+    })
+#elif M0 == 5 // M0 == 5
+#define RHS_VFMA_M0xN0(i, a, b, c)                                    \
+    ({                                                                \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
+    })
+#elif M0 == 6 // M0 == 6
+#define RHS_VFMA_M0xN0(i, a, b, c)                                    \
+    ({                                                                \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
+    })
+#elif M0 == 7 // M0 == 7
+#define RHS_VFMA_M0xN0(i, a, b, c)                                    \
+    ({                                                                \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##6).s##i), b, (c##6)); \
+    })
+#elif M0 == 8 // M0 == 8
+#define RHS_VFMA_M0xN0(i, a, b, c)                                    \
+    ({                                                                \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##0).s##i), b, (c##0)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##1).s##i), b, (c##1)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##2).s##i), b, (c##2)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##3).s##i), b, (c##3)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##4).s##i), b, (c##4)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##5).s##i), b, (c##5)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##6).s##i), b, (c##6)); \
+        VFMA((VEC_DATA_TYPE(DATA_TYPE, N0))((a##7).s##i), b, (c##7)); \
+    })
+#else // M0 not supported
+#error "M0 not supported"
+#endif // M0 not supported
+)_";
+}
+
+std::string ClGemmNativeKernelComponent::get_component_code() const
+{
+    std::string code = R"_(
+    //------------------ START KERNEL {{meta_kernel_id}} ---------------------
+    // IN_0(lhs)            {{lhs}}
+    // IN_1(rhs)            {{rhs}}
+    )_";
+
+    if(!_bias.is_empty())
+    {
+        code += R"_(
+    // IN_2(bias)           {{bias}}
+    )_";
+    }
+
+    code += R"_(
+    // OUT(dst, accum)      {{dst}}
+
+    // Initialize the accumulators
+    REPEAT_VAR_INIT_TO_CONST(M0, VEC_DATA_TYPE(DATA_TYPE, N0), {{dst}}, 0); //VEC_DATA_TYPE(DATA_TYPE, N0)    c0=0,c1=0,c2=0,... c(M0-1)=0;
+    {
+#if defined(DUMMY_WORK_ITEMS)
+        if((g_x * N0 >= N) || (g_y * M0 >= M))
+        {
+            return;
+        }
+#endif // defined(DUMMY_WORK_ITEMS)
+
+        // Compute LHS matrix address
+        uint lhs_offset = {{lhs}}_offset_first_element_in_bytes + COMPUTE_M0_START_ROW(g_y, M0, PARTIAL_STORE_M0) * (uint){{lhs}}_stride_y;
+
+        // Compute RHS matrix address
+        uint rhs_offset = {{rhs}}_offset_first_element_in_bytes + g_x * N0 * sizeof(DATA_TYPE);
+
+#if defined(MATRIX_B_DEPTH)
+        // Do not slide matrix B if the matrix B has 3 dimensions and matrix A more than 3
+        rhs_offset += (g_z % MATRIX_B_DEPTH) * {{rhs}}_stride_z;
+#else  // defined(MATRIX_B_DEPTH)
+        rhs_offset += g_z * {{rhs}}_stride_z;
+#endif // defined(MATRIX_B_DEPTH)
+
+        REPEAT_VAR_INIT_TO_CONST(M0, uint, zlhs, 0);
+
+#if defined(REINTERPRET_INPUT_AS_3D)
+        // The plane (zlhs) is calculated dividing M (g_y * M0) by HEIGHT_GEMM3D
+        CALCULATE_Z_OFFSET(M0, uint, zlhs, COMPUTE_M0_START_ROW(g_y, M0, PARTIAL_STORE_M0), HEIGHT_GEMM3D, DEPTH_GEMM3D, {{lhs}}_cross_plane_pad, {{lhs}}_stride_y);
+
+        // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
+        // multiply lhs_stride_z by DEPTH_GEMM3D
+        lhs_offset += g_z * {{lhs}}_stride_z * DEPTH_GEMM3D;
+
+#else // defined(REINTERPRET_INPUT_AS_3D)
+
+        // Add offset for batched GEMM
+        lhs_offset += g_z * {{lhs}}_stride_z;
+
+#endif // defined(REINTERPRET_INPUT_AS_3D)
+
+        int i = 0;
+#if K0 > 1
+        for(; i <= (K - K0); i += K0)
+        {
+            // Supported cases (M0, K0):
+            // 1,2 - 1,3 - 1,4 - 1,8 - 1,16
+            // 2,2 - 2,3 - 2,4 - 2,8 - 2,16
+            // 3,2 - 3,3 - 3,4 - 3,8 - 3,16
+            // 4,2 - 4,3 - 4,4 - 4,8 - 4,16
+            // 5,2 - 5,3 - 5,4 - 5,8 - 5,16
+            // 6,2 - 6,3 - 6,4 - 6,8 - 6,16
+            // 7,2 - 7,3 - 7,4 - 7,8 - 7,16
+            // 8,2 - 8,3 - 8,4 - 8,8 - 8,16
+            // Load values from LHS matrix
+            LOAD_BLOCK(M0, K0, DATA_TYPE, a, {{lhs}}_ptr, lhs_offset, {{lhs}}_stride_y, zlhs);
+
+            // Load values from RHS matrix
+            LOAD_BLOCK(K0, N0, DATA_TYPE, b, {{rhs}}_ptr, rhs_offset, {{rhs}}_stride_y, g_zero);
+
+            RHS_VFMA_M0xN0(0, a, b0, {{dst}});
+            RHS_VFMA_M0xN0(1, a, b1, {{dst}});
+#if K0 > 2
+            RHS_VFMA_M0xN0(2, a, b2, {{dst}});
+#endif // K0 > 2
+#if K0 > 3
+            RHS_VFMA_M0xN0(3, a, b3, {{dst}});
+#endif // K0 > 3
+#if K0 > 4
+            RHS_VFMA_M0xN0(4, a, b4, {{dst}});
+            RHS_VFMA_M0xN0(5, a, b5, {{dst}});
+            RHS_VFMA_M0xN0(6, a, b6, {{dst}});
+            RHS_VFMA_M0xN0(7, a, b7, {{dst}});
+#endif // K0 > 4
+#if K0 > 8
+            RHS_VFMA_M0xN0(8, a, b8, {{dst}});
+            RHS_VFMA_M0xN0(9, a, b9, {{dst}});
+            RHS_VFMA_M0xN0(A, a, bA, {{dst}});
+            RHS_VFMA_M0xN0(B, a, bB, {{dst}});
+            RHS_VFMA_M0xN0(C, a, bC, {{dst}});
+            RHS_VFMA_M0xN0(D, a, bD, {{dst}});
+            RHS_VFMA_M0xN0(E, a, bE, {{dst}});
+            RHS_VFMA_M0xN0(F, a, bF, {{dst}});
+#endif // K0 > 8
+
+            lhs_offset += K0 * sizeof(DATA_TYPE);
+            rhs_offset += K0 * {{rhs}}_stride_y;
+        }
+#endif // K0 > 1
+        // Left-over accumulations
+        for(; i < K; ++i)
+        {
+            // Load values from LHS matrix
+            VEC_DATA_TYPE(DATA_TYPE, 2)
+            a0 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 0 * {{lhs}}_stride_y + zlhs0));
+#if M0 > 1
+            VEC_DATA_TYPE(DATA_TYPE, 2)
+            a1 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 1 * {{lhs}}_stride_y + zlhs1));
+#endif // M0 > 1
+#if M0 > 2
+            VEC_DATA_TYPE(DATA_TYPE, 2)
+            a2 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 2 * {{lhs}}_stride_y + zlhs2));
+#endif // M0 > 2
+#if M0 > 3
+            VEC_DATA_TYPE(DATA_TYPE, 2)
+            a3 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 3 * {{lhs}}_stride_y + zlhs3));
+#endif // M0 > 3
+#if M0 > 4
+            VEC_DATA_TYPE(DATA_TYPE, 2)
+            a4 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 4 * {{lhs}}_stride_y + zlhs4));
+#endif // M0 > 4
+#if M0 > 5
+            VEC_DATA_TYPE(DATA_TYPE, 2)
+            a5 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 5 * {{lhs}}_stride_y + zlhs5));
+#endif // M0 > 5
+#if M0 > 6
+            VEC_DATA_TYPE(DATA_TYPE, 2)
+            a6 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 6 * {{lhs}}_stride_y + zlhs6));
+#endif // M0 > 6
+#if M0 > 7
+            VEC_DATA_TYPE(DATA_TYPE, 2)
+            a7 = *((__global DATA_TYPE *)({{lhs}}_ptr + lhs_offset + 7 * {{lhs}}_stride_y + zlhs7));
+#endif // M0 > 7
+
+            VEC_DATA_TYPE(DATA_TYPE, N0)
+            b = VLOAD(N0)(0, (__global DATA_TYPE *)({{rhs}}_ptr + rhs_offset + 0 * {{rhs}}_stride_y));
+            RHS_VFMA_M0xN0(0, a, b, {{dst}});
+
+            lhs_offset += sizeof(DATA_TYPE);
+            rhs_offset += {{rhs}}_stride_y;
+        }
+
+        // Multiply by the weight of matrix-matrix product and store the result
+#if defined(ALPHA)
+        SCALE_BLOCK(M0, DATA_TYPE, {{dst}}, ALPHA);
+#endif // defined(ALPHA)
+    )_";
+
+    if(!_bias.is_empty())
+    {
+        code += R"_(
+        // Add beta*bias
+#if defined(BROADCAST_BIAS)
+        __global uchar *bias_addr = {{bias}}_ptr + {{bias}}_offset_first_element_in_bytes + (get_global_id(0) * (uint)N0 * sizeof(DATA_TYPE));
+
+        LOAD_BLOCK(1, N0, DATA_TYPE, bias, bias_addr, 0, {{bias}}_stride_y, g_zero);
+
+#ifndef UNIT_BETA
+        SCALE_BLOCK(1, DATA_TYPE, bias, BETA);
+#endif // UNIT_BIAS
+
+        // c = c + bias[broadcasted]
+        ADD_BLOCK_BROADCAST(M0, {{dst}}, bias0);
+
+#else // defined(BROADCAST_BIAS)
+        __global uchar *bias_addr = {{bias}}_ptr + {{bias}}_offset_first_element_in_bytes + (g_x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(g_y, M0,
+                                    PARTIAL_STORE_M0)
+                                    * {{bias}}_stride_y)
+                                    + g_z * {{bias}}_stride_z;
+
+        LOAD_BLOCK(M0, N0, DATA_TYPE, bias, bias_addr, 0, {{bias}}_stride_y, g_zero);
+
+#ifndef UNIT_BETA
+        SCALE_BLOCK(M0, DATA_TYPE, bias, BETA);
+#endif // UNIT_BIAS
+
+        // c = c + bias
+        ADD_BLOCK(M0, {{dst}}, bias);
+
+#endif // defined(BROADCAST_BIAS)
+    )_";
+    }
+
+    code += R"_(
+    }
+    //------------------ END KERNEL {{meta_kernel_id}} ---------------------
+    )_";
+    return code.c_str();
+}
+
+ClGemmNativeKernelComponent::TagLUT ClGemmNativeKernelComponent::allocate_vars(SharedVarTable &vtable) const
+{
+    TagLUT lut{};
+
+    lut["meta_kernel_id"] = id();
+    lut["lhs"]            = vtable.add(_lhs, ClKernelArgRuntimeDescriptor(_lhs.arg_id, TensorArgType::Image_3D), "lhs");
+    lut["rhs"]            = vtable.add(_rhs, ClKernelArgRuntimeDescriptor(_rhs.arg_id, TensorArgType::Image_3D), "rhs");
+    if(!_bias.is_empty()) // optional bias
+    {
+        lut["bias"] = vtable.add(_bias, ClKernelArgRuntimeDescriptor(_bias.arg_id, TensorArgType::Image_3D), "bias");
+    }
+    lut["dst"] = vtable.add(_dst, ClKernelArgRuntimeDescriptor(_dst.arg_id, TensorArgType::Image_3D), "dst");
+    return lut;
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.h
new file mode 100644
index 0000000..38f007c
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLGEMMNATIVEKERNELCOMPONENT_H
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLGEMMNATIVEKERNELCOMPONENT_H
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+class ClGemmNativeKernelComponent : public IClKernelComponent
+{
+public:
+    ClGemmNativeKernelComponent(const Link &lhs, const Link &rhs, const Link &dst, const Link &bias = Link{})
+        : _lhs{ lhs }, _rhs{ rhs }, _bias{ bias }, _dst{ dst }
+    {
+    }
+    ComponentType         get_component_type() const override;
+    std::set<std::string> get_headers_list() const override;
+    std::string           get_additional_macros() const override;
+    std::string           get_component_code() const override;
+    ClKernelArgList       get_args();
+
+    virtual std::vector<Link> get_links() const override
+    {
+        return { _lhs, _rhs, _bias, _dst };
+    }
+
+    virtual TagLUT allocate_vars(SharedVarTable &vtable) const override;
+
+    virtual std::string name() const override
+    {
+        return "gemm_mm_native_" + std::to_string(id());
+    }
+
+private:
+    Link _lhs{};
+    Link _rhs{};
+    Link _bias{};
+    Link _dst{};
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif // ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLGEMMNATIVEKERNELCOMPONENT_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClKernelComponents.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClKernelComponents.h
new file mode 100644
index 0000000..b751ce2
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClKernelComponents.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_CLKERNELCOMPONENTS_H
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_CLKERNELCOMPONENTS_H
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClElementwiseAddKernelComponent.h"
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClGemmNativeKernelComponent.h"
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.h"
+
+#endif //ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_CLKERNELCOMPONENTS_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp
new file mode 100644
index 0000000..430fafb
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+ComponentType ClStoreBlockBoundaryAwareKernelComponent::get_component_type() const
+{
+    return ComponentType::Store;
+}
+
+std::string ClStoreBlockBoundaryAwareKernelComponent::get_component_code() const
+{
+    return R"_(
+    //------------------ START KERNEL {{meta_kernel_id}} STORE ---------------------
+
+    __global uchar *dst_addr = {{dst}}_ptr + {{dst}}_offset_first_element_in_bytes + (g_x * (uint)N0 * sizeof(DATA_TYPE)) + (COMPUTE_M0_START_ROW(g_y, M0, PARTIAL_STORE_M0) * {{dst}}_stride_y);
+
+#if defined(REINTERPRET_OUTPUT_AS_3D)
+    // Add offset for batched GEMM. The batches will be in the fourth dimension and for this reason we
+    // multiply dst_stride_z by DEPTH_GEMM3D
+    dst_addr += g_z * {{dst}}_stride_z * DEPTH_GEMM3D;
+
+#else // defined(REINTERPRET_OUTPUT_AS_3D)
+
+    // Add offset for batched GEMM
+    dst_addr += g_z * {{dst}}_stride_z;
+
+#endif // defined(REINTERPRET_OUTPUT_AS_3D)
+
+    STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, {{src}}, dst_addr, {{dst}}_stride_y, g_zout, PARTIAL_STORE_M0, PARTIAL_STORE_N0, g_cond_y, g_cond_x);
+
+    //------------------ END KERNEL {{meta_kernel_id}} STORE ---------------------
+
+)_";
+}
+ClStoreBlockBoundaryAwareKernelComponent::TagLUT ClStoreBlockBoundaryAwareKernelComponent::allocate_vars(SharedVarTable &vtable) const
+{
+    return {
+        { "meta_kernel_id", id() },
+        { "src", vtable.add(_src, ClKernelArgRuntimeDescriptor(_src.arg_id, TensorArgType::Image_3D), "src") },
+        { "dst", vtable.add(_dst, ClKernelArgRuntimeDescriptor(_dst.arg_id, TensorArgType::Image_3D), "dst") },
+    };
+}
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file
diff --git a/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.h b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.h
new file mode 100644
index 0000000..f0d01d3
--- /dev/null
+++ b/src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/components/ClStoreKernelComponents.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#if defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
+
+#ifndef ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLSTOREKERNELCOMPONENTS_H
+#define ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLSTOREKERNELCOMPONENTS_H
+
+#include "src/core/experimental/dynamic_fusion/ClKernelBuildingImpl/Common.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+class ClStoreBlockBoundaryAwareKernelComponent : public IClKernelComponent
+{
+public:
+    ClStoreBlockBoundaryAwareKernelComponent(const Link &src, const Link &dst)
+        : _src{ src }, _dst{ dst }
+    {
+    }
+    ComponentType get_component_type() const override;
+    std::string   get_component_code() const override;
+
+    virtual std::vector<Link> get_links() const override
+    {
+        return { _src, _dst };
+    }
+
+    virtual TagLUT allocate_vars(SharedVarTable &vtable) const override;
+
+    virtual std::string name() const override
+    {
+        return "";
+    }
+
+private:
+    Link _src{};
+    Link _dst{};
+};
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
+#endif // ARM_COMPUTE_EXPERIMENTAL_DYNAMICFUSION_IMPL_COMPONENTS_CLSTOREKERNELCOMPONENTS_H
+
+#endif // defined(ENABLE_EXPERIMENTAL_DYNAMIC_FUSION)
\ No newline at end of file