Add output operator for dynamic fusion

* The output of the fused operator must be explicitly specified
  using GpuOutput operator.
* Any temporary tensors used to connect the output of an operator
  to the input of another operator will be marked as no-alloc
  and won't be allocated as a tensor in the memory.

Resolves: COMPMID-5771
Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Change-Id: I5ae8e800f8f737db23a055a92b01c4f1d78c3bb8
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8794
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: SiCong Li <sicong.li@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
index 36168d1..7e427fe 100644
--- a/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
+++ b/src/dynamic_fusion/runtime/gpu/cl/ClWorkloadRuntime.cpp
@@ -132,10 +132,11 @@
             ARM_COMPUTE_ERROR_ON(tensor_info.id() != t_id);
             const auto aux_memory_info = workload_arg->memory_descriptor()->aux_memory_info;
             tensor_object              = aux_tensors->add_aux_tensor(tensor_info, aux_memory_info);
-        }
-        if(tensor_object == nullptr)
-        {
-            return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Failed to construct an auxiliary tensor");
+
+            if(tensor_object == nullptr)
+            {
+                return ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Failed to construct an auxiliary tensor");
+            }
         }
     }
     return Status{};
diff --git a/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp b/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp
index 6e6422c..1f90aab 100644
--- a/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp
+++ b/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp
@@ -42,15 +42,24 @@
  */
 MemoryDescriptorMap assign_memory_descriptors(const std::map<ITensorInfo::Id, const ITensorInfo *> tensors, const DependencyGraph &graph)
 {
+    const auto all_tensors = graph.all_tensors();
+    const auto src_tensors = graph.global_src_tensors();
+    const auto dst_tensors = graph.global_dst_tensors();
+    const auto interm_tensors = graph.intermediate_tensors();
+
     MemoryDescriptorMap mem_map{};
-    for(auto t_id : graph.all_tensors())
+    for(auto t_id : all_tensors)
     {
         const auto &tensor = tensors.at(t_id);
         // Only global src and dst tensors to the entire component graph are "User" tensors, which are user-specified memories
-        if(is_in(t_id, graph.global_src_tensors()) || is_in(t_id, graph.global_dst_tensors()))
+        if(is_in(t_id, src_tensors) || is_in(t_id, dst_tensors))
         {
             mem_map[t_id] = MemoryDescriptor{ MemoryType::User };
         }
+        else if(is_in(t_id, interm_tensors))
+        {
+            mem_map[t_id] = MemoryDescriptor { MemoryType::NoAlloc };
+        }
         else
         {
             AuxMemoryInfo aux_mem_info{ tensor->total_size() };
diff --git a/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.cpp b/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.cpp
index c560e9a..66760b3 100644
--- a/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.cpp
+++ b/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.cpp
@@ -40,7 +40,6 @@
 GpuLogicalKernel::GpuLogicalKernel(GpuComponentServices *services, const GpuKernelComponentGroup &components)
     : _services{ services }, _comp_group{ components }, _store_components{}
 {
-    add_load_store();
 }
 
 GpuKernelSourceCode GpuLogicalKernel::write_kernel_code()
@@ -57,32 +56,6 @@
 
     return code;
 }
-
-void GpuLogicalKernel::add_load_store()
-{
-    const auto dst_tensors = _comp_group.get_dst_tensors();
-    // Each dst tensor from the component group requires exactly one store component
-    for(const auto &dst_tensor : dst_tensors)
-    {
-        ArgumentPack<ITensorInfo> tensors;
-        // Pass same destination tensor to both source and destination of the store component
-        // In other words, the addition of a store component does not create a new dst tensor
-        // This way we avoid the issue of the dst tensor of the component group differs from that of a logical kernel
-        // This may seem to violate the acyclic-ness of the component graph. But it is fine because at the point of
-        // the construction of the logical kernel, we do not need a graph representation of components anymore
-        // (the graph has been serialized)
-        tensors.add_const_tensor(ACL_SRC_0, dst_tensor);
-        tensors.add_const_tensor(ACL_DST_0, dst_tensor);
-
-        auto store = _services->component_factory().create<ClComponentStore>(
-                         _comp_group.get_root_component()->properties(), // Store component share the same properties as that of the root component
-                         tensors);
-        _store_components.push_back(std::move(store));
-        auto success = _comp_group.add_component(_store_components.back().get());
-        ARM_COMPUTE_UNUSED(success);
-        ARM_COMPUTE_ERROR_ON(!success); // It's guaranteed that any load store insertion should be successful
-    }
-}
 } // namespace dynamic_fusion
 } // namespace experimental
 } // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.h b/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.h
index 4ce4443..2654224 100644
--- a/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.h
+++ b/src/dynamic_fusion/sketch/gpu/GpuLogicalKernel.h
@@ -65,8 +65,6 @@
     GpuKernelSourceCode write_kernel_code();
 
 private:
-    void add_load_store();
-
     GpuComponentServices                             *_services;
     GpuKernelComponentGroup                           _comp_group{};
     std::vector<std::unique_ptr<IGpuKernelComponent>> _store_components{};
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
new file mode 100644
index 0000000..017536d
--- /dev/null
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2022 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
+
+#include "src/core/helpers/AutoConfiguration.h"
+#include "src/common/utils/Log.h"
+
+#include "src/dynamic_fusion/sketch/ArgumentPack.h"
+#include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
+#include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentStore.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+namespace
+{
+constexpr GpuOperatorType operator_type = GpuOperatorType::Simple;
+}
+
+Status GpuOutput::is_supported_op(const GpuWorkloadContext &context,
+                                  const ITensorInfo        *src,
+                                  const ITensorInfo        *dst)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+
+    // Initialize the destination tensor info.
+    TensorInfo dst_to_validate = *dst;
+    auto_init_if_empty(dst_to_validate, *src);
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, &dst_to_validate);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, &dst_to_validate);
+
+    ARM_COMPUTE_UNUSED(context);
+    return Status{};
+}
+
+Status GpuOutput::validate_op(const GpuWorkloadSketch &sketch,
+                              const ITensorInfo       *src,
+                              const ITensorInfo       *dst)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
+    ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
+    ARM_COMPUTE_RETURN_ERROR_ON(!dst->has_valid_id());
+
+    // Initialize the destination tensor info.
+    TensorInfo dst_to_validate = *dst;
+    auto_init_if_empty(dst_to_validate, *src);
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(src, &dst_to_validate);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(src, &dst_to_validate);
+
+    // Perform fusion test.
+    ArgumentPack<ITensorInfo> tensors;
+    tensors.add_const_tensor(ACL_SRC_0, src);
+    tensors.add_const_tensor(ACL_DST_0, &dst_to_validate);
+
+    const auto group = sketch.implementation().operator_group();
+    const auto op = group.new_operator(operator_type, tensors);
+    const auto success = group.try_add_operator(op);
+
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(!success, "This operator cannot be fused into the workload.");
+    ARM_COMPUTE_UNUSED(success);
+
+    const auto status = is_supported_op(*sketch.gpu_context(), src, dst);
+    return status;
+}
+
+void GpuOutput::create_op(GpuWorkloadSketch &sketch,
+                          ITensorInfo       *src,
+                          ITensorInfo       *dst)
+{
+    ARM_COMPUTE_LOG_PARAMS(src, dst);
+    ARM_COMPUTE_ERROR_THROW_ON(GpuOutput::validate_op(sketch, src, dst));
+
+    // Auto initialize dst tensor info if empty
+    auto_init_if_empty(*dst, *src);
+
+    // Translate into components and add to component graph
+    auto &comp_graph = sketch.implementation().component_graph();
+    const auto sketch_ctx = sketch.implementation().context();
+
+    if(sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
+    {
+        ARM_COMPUTE_ERROR_ON(sketch_ctx->cl_compile_context() == nullptr);
+
+        // Add store component
+        {
+            IGpuKernelComponent::Properties properties;
+            properties.stage(UnitWorkloadStage{ UnitWorkloadStage::Stage::Run });
+
+            ArgumentPack<ITensorInfo> arguments;
+            arguments.add_const_tensor(ACL_SRC_0, src);
+            arguments.add_const_tensor(ACL_DST_0, dst);
+            comp_graph.add_new_component<ClComponentStore>(properties, arguments);
+        }
+    }
+    else
+    {
+        ARM_COMPUTE_ERROR("Unimplemented Gpu language");
+    }
+
+    // Set up fusion test by adding to the Operator Group
+    // Note this has to be performed after all the components have been successfully added to the component graph
+
+    // Pack tensor infos
+    ArgumentPack<ITensorInfo> tensors;
+    tensors.add_const_tensor(ACL_SRC_0, src);
+    tensors.add_const_tensor(ACL_DST_0, dst);
+
+    const Operator op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
+    sketch.implementation().operator_group().add_operator(op);
+}
+
+} // namespace dynamic_fusion
+} // namespace experimental
+} // namespace arm_compute
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp
index 996bf15..6c1e0fb 100644
--- a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateElementwiseBinary.cpp
@@ -110,8 +110,8 @@
 R"_(
     LOOP_UNROLLING(int, i, 0, 1, M0,
     {
-        g_dst_indirect_y[i].v = (uint)min(g_ind_1 + i, (int)({{dst}}_w * {{dst}}_h) - 1);
-        g_dst_indirect_y[i].v += g_ind_2 * (int)({{dst}}_w * {{dst}}_h);
+        g_dst_indirect_y[i].v = (uint)min(g_ind_1 + i, (int)({{out}}_w * {{out}}_h) - 1);
+        g_dst_indirect_y[i].v += g_ind_2 * (int)({{out}}_w * {{out}}_h);
     })
     }
     //------------------ END KERNEL {{meta_kernel_id}} ELTWISE_OP ---------------------
@@ -194,6 +194,7 @@
         lut["lhs"] = vtable.get_variable(_lhs);
         lut["rhs"] = vtable.get_variable(_rhs);
         lut["dst"] = vtable.get_variable(_dst);
+        lut["out"] = vtable.get_variable(comp_group.get_dst_tensors().front());
     }
     else
     {
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp
index cb643a7..0afd0e7 100644
--- a/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/cl/ClTemplateWriter.cpp
@@ -179,7 +179,11 @@
         code += macros;
     }
 
-    code += write_kernel_signature(_vtable.get_variable_list(_components.get_argument_tensors()));
+    auto arguments = _components.get_argument_tensors();
+    std::sort(arguments.begin(), arguments.end(), [](const ITensorInfo *l, const ITensorInfo *r) {
+        return l->id() < r->id();
+    });
+    code += write_kernel_signature(_vtable.get_variable_list(arguments));
 
     code += "\n{\n\n";
 
@@ -190,6 +194,7 @@
     for(const auto &component_code : component_codes)
     {
         code += component_code;
+        code += "\n";
     }
 
     code += "}\n";
diff --git a/src/dynamic_fusion/sketch/utils/DependencyGraph.h b/src/dynamic_fusion/sketch/utils/DependencyGraph.h
index 03678de..633c5e4 100644
--- a/src/dynamic_fusion/sketch/utils/DependencyGraph.h
+++ b/src/dynamic_fusion/sketch/utils/DependencyGraph.h
@@ -417,6 +417,33 @@
         }
         return tensors;
     }
+    /** Get intermediate tensors of the whole graph.
+     *
+     * @return std::vector<TensorId>
+     */
+    std::vector<TensorId> intermediate_tensors() const
+    {
+        std::vector<TensorId> tensors;
+
+        // If a tensor is used to connect the input of an operator and the output of another operator,
+        // it is not allocated in the memory. The tensor exists as a temporary variable only.
+        for(auto src_tensor : _adj_src_ops)
+        {
+            if(!src_tensor.second.empty())
+            {
+                const auto dst_tensor = _adj_dst_ops.find(src_tensor.first);
+                if(dst_tensor != _adj_dst_ops.end())
+                {
+                    if(!dst_tensor->second.empty())
+                    {
+                        tensors.push_back(src_tensor.first);
+                    }
+                }
+            }
+        }
+
+        return tensors;
+    }
     /** Get all root ops. Root ops can also be referred to as "src ops" of the whole graph
      *
      * @return std::vector<OperatorId>