Add output operator for dynamic fusion

* The output of the fused operator must be explicitly specified
  using GpuOutput operator.
* Any temporary tensors used to connect the output of an operator
  to the input of another operator will be marked as no-alloc
  and won't be allocated as a tensor in the memory.

Resolves: COMPMID-5771
Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Change-Id: I5ae8e800f8f737db23a055a92b01c4f1d78c3bb8
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8794
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: SiCong Li <sicong.li@arm.com>
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp b/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp
index 6e6422c..1f90aab 100644
--- a/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp
+++ b/src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.cpp
@@ -42,15 +42,24 @@
  */
 MemoryDescriptorMap assign_memory_descriptors(const std::map<ITensorInfo::Id, const ITensorInfo *> tensors, const DependencyGraph &graph)
 {
+    const auto all_tensors = graph.all_tensors();
+    const auto src_tensors = graph.global_src_tensors();
+    const auto dst_tensors = graph.global_dst_tensors();
+    const auto interm_tensors = graph.intermediate_tensors();
+
     MemoryDescriptorMap mem_map{};
-    for(auto t_id : graph.all_tensors())
+    for(auto t_id : all_tensors)
     {
         const auto &tensor = tensors.at(t_id);
         // Only global src and dst tensors to the entire component graph are "User" tensors, which are user-specified memories
-        if(is_in(t_id, graph.global_src_tensors()) || is_in(t_id, graph.global_dst_tensors()))
+        if(is_in(t_id, src_tensors) || is_in(t_id, dst_tensors))
         {
             mem_map[t_id] = MemoryDescriptor{ MemoryType::User };
         }
+        else if(is_in(t_id, interm_tensors))
+        {
+            mem_map[t_id] = MemoryDescriptor { MemoryType::NoAlloc };
+        }
         else
         {
             AuxMemoryInfo aux_mem_info{ tensor->total_size() };