Handle Intermediate tensors within the sketch

    - Intermediate tensor info objects are not created by the user anymore. They're returned from create_op and reused. This will prevent allocation of the intermediate tensors in case of possible interface misuse.
    - Sketch object handles intermediate tensor info pointers inside its implementation class via a unique pointer vector
    - Conv2d operator is migrated into the new interface

Resolves: COMPMID-5776

Change-Id: I9422e3681eef4f2d2922f6d0a5d7786380837c6d
Signed-off-by: Gunes Bayir <gunes.bayir@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8906
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: SiCong Li <sicong.li@arm.com>
Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h b/src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h
index 3997395..08796b6 100644
--- a/src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h
+++ b/src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -29,6 +29,9 @@
 #include "src/dynamic_fusion/sketch/gpu/GpuKernelComponentGraph.h"
 #include "src/dynamic_fusion/sketch/gpu/GpuOperatorGroup.h"
 
+#include <memory>
+#include <vector>
+
 namespace arm_compute
 {
 namespace experimental
@@ -48,7 +51,8 @@
         : _context{ context },
           _comp_services{},
           _component_graph{ &_comp_services },
-          _operator_group{}
+          _operator_group{},
+          _interm_tensor_info_list{ std::vector<std::unique_ptr<TensorInfo>>() }
     {
     }
     /** Prevent instances of this class from being copy constructed */
@@ -97,13 +101,25 @@
     {
         return component_graph().fuse().write_workload_code();
     }
+    /** Create an intermediate tensor info and save it
+     *
+     * @return ITensorInfo  The created intermediate tensor info object pointer
+     */
+    ITensorInfo *create_intermediate_tensor()
+    {
+        auto uptr = std::make_unique<TensorInfo>();
+        uptr->set_id(-allocate_new_tensor_id()); // intermediate tensors must have negative id
+        _interm_tensor_info_list.emplace_back(std::move(uptr));
+        return _interm_tensor_info_list.back().get();
+    }
 
 private:
-    Context                *_context;
-    GpuComponentServices    _comp_services;
-    GpuKernelComponentGraph _component_graph;
-    GpuOperatorGroup        _operator_group;
-    ITensorInfo::Id         _next_id{ ITensorInfo::invalid_tensor_id };
+    Context                                 *_context;
+    GpuComponentServices                     _comp_services;
+    GpuKernelComponentGraph                  _component_graph;
+    GpuOperatorGroup                         _operator_group;
+    ITensorInfo::Id                          _next_id{ ITensorInfo::invalid_tensor_id };
+    std::vector<std::unique_ptr<TensorInfo>> _interm_tensor_info_list;
 };
 } // namespace dynamic_fusion
 } // namespace experimental
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp
index 00b4fbc..00fbb73 100644
--- a/src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuConv2d.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -111,33 +111,29 @@
     }
 }
 
-constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
-} // namespace
-
-Status GpuConv2d::is_supported_op(const GpuWorkloadContext &context,
-                                  const ITensorInfo        *src,
-                                  const ITensorInfo        *wei,
-                                  const ITensorInfo        *bia,
-                                  const ITensorInfo        *dst,
-                                  const Conv2dAttributes   &attributes)
+/* A helper method to reduce the duplication in dst tensor initialization
+*  when calling validate()
+*/
+Status is_supported_op_helper(const GpuWorkloadContext &context,
+                              const ITensorInfo        *src,
+                              const ITensorInfo        *wei,
+                              const ITensorInfo        *bia,
+                              const ITensorInfo        *dst,
+                              const Conv2dAttributes   &attributes)
 {
-    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei, dst);
-    // Auto initialize dst tensor info
-    TensorInfo dst_info_to_validate = *dst;
-    const auto data_layout          = src->data_layout();
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei);
 
+    TensorInfo         dst_info_to_validate;
+    const ITensorInfo *dst_info_to_validate_ptr = &dst_info_to_validate;
+
+    const DataLayout data_layout = src->data_layout();
+    if(dst != nullptr)
     {
-        auto shape = misc::shape_calculator::compute_deep_convolution_shape(src->tensor_shape(), data_layout, wei->tensor_shape(),
-                                                                            PadStrideInfo(attributes.stride().x(), attributes.stride().y(), attributes.pad().left,
-                                                                                          attributes.pad().right,
-                                                                                          attributes.pad().top, attributes.pad().bottom, DimensionRoundingType::FLOOR)); // use the default DimensionRoundingType
-
-        // Checks performed when dst is configured
-        if(dst->total_size() != 0)
-        {
-            ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(dst->tensor_shape(), shape);
-        }
-        auto_init_if_empty(dst_info_to_validate, src->clone()->set_tensor_shape(shape));
+        dst_info_to_validate_ptr = dst;
+    }
+    else
+    {
+        calculate_and_init_dst_if_empty(&dst_info_to_validate, src, wei, attributes);
     }
 
     // Check support level
@@ -147,7 +143,7 @@
     ARM_COMPUTE_RETURN_ERROR_ON_DATA_LAYOUT_NOT_IN(src, DataLayout::NHWC);
 
     // Check components
-    const auto gpu_target  = context.gpu_target();
+    const auto gpu_target = context.gpu_target();
     if(context.gpu_language() == GpuLanguage::OpenCL)
     {
         const auto cl_compile_ctx = context.cl_compile_context();
@@ -162,13 +158,13 @@
 
             settings.fast_relaxed_math(
                 (gpu_target != GPUTarget::G71 && (gpu_target & GPUTarget::GPU_ARCH_MASK) == GPUTarget::BIFROST)
-                && (dst_info_to_validate.data_type() == DataType::F32 || dst_info_to_validate.data_type() == DataType::F16));
+                && (dst_info_to_validate_ptr->data_type() == DataType::F32 || dst_info_to_validate_ptr->data_type() == DataType::F16));
 
             ArgumentPack<ITensorInfo> arguments;
             arguments.add_const_tensor(ACL_SRC_0, src);
             arguments.add_const_tensor(ACL_SRC_1, wei);
             arguments.add_const_tensor(ACL_SRC_2, bia);
-            arguments.add_const_tensor(ACL_DST_0, &dst_info_to_validate);
+            arguments.add_const_tensor(ACL_DST_0, dst_info_to_validate_ptr);
             ARM_COMPUTE_RETURN_ON_ERROR(ClComponentDirectConv2d::validate(properties, arguments, attributes, settings));
         }
     }
@@ -179,25 +175,40 @@
     return Status{};
 }
 
+constexpr GpuOperatorType operator_type = GpuOperatorType::Complex;
+} // namespace
+
+Status GpuConv2d::is_supported_op(const GpuWorkloadContext &context,
+                                  const ITensorInfo        *src,
+                                  const ITensorInfo        *wei,
+                                  const ITensorInfo        *bia,
+                                  const Conv2dAttributes   &attributes)
+{
+    return is_supported_op_helper(context, src, wei, bia, nullptr, attributes);
+}
+
 Status GpuConv2d::validate_op(const GpuWorkloadSketch &sketch,
                               const ITensorInfo       *src,
                               const ITensorInfo       *wei,
                               const ITensorInfo       *bia,
-                              const ITensorInfo       *dst,
                               const Conv2dAttributes &attributes)
 {
-    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei, dst);
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, wei);
 
     // Check if tensors have valid id. I.e. they are created from a sketch
-    ARM_COMPUTE_RETURN_ERROR_ON(
-        !src->has_valid_id() || !wei->has_valid_id() || !dst->has_valid_id());
+    ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id() || !wei->has_valid_id());
     if(bia != nullptr)
     {
         ARM_COMPUTE_RETURN_ERROR_ON(!bia->has_valid_id());
     }
 
+    // This tensor info will have invalid id but because all the existing tensors in the
+    // sketch have valid ids and the DependencyGraph implementation has no notion of validness
+    // regarding tensor ids, it'll be just another tensor id and will validate
+    // Additionally, a new dst id is added every time in create_op, thus there's no need to validate it
+    TensorInfo dst_info_to_validate;
+
     // Auto initialize dst tensor info
-    TensorInfo dst_info_to_validate = *dst;
     calculate_and_init_dst_if_empty(&dst_info_to_validate, src, wei, attributes);
 
     // Perform fusion test
@@ -212,25 +223,26 @@
                                     "Operator fusion test failed. This operator cannot be fused into the workload");
 
     // Check if configuration is supported
-    return is_supported_op(*sketch.gpu_context(), src, wei, bia, &dst_info_to_validate, attributes);
+    return is_supported_op_helper(*sketch.gpu_context(), src, wei, bia, &dst_info_to_validate, attributes);
 }
 
-void GpuConv2d::create_op(GpuWorkloadSketch      &sketch,
-                          ITensorInfo            *src,
-                          ITensorInfo            *wei,
-                          ITensorInfo            *bia,
-                          ITensorInfo            *dst,
-                          const Conv2dAttributes &attributes)
+ITensorInfo *GpuConv2d::create_op(GpuWorkloadSketch      &sketch,
+                                  ITensorInfo            *src,
+                                  ITensorInfo            *wei,
+                                  ITensorInfo            *bia,
+                                  const Conv2dAttributes &attributes)
 {
-    ARM_COMPUTE_LOG_PARAMS(src, wei, bia, dst, attributes);
+    ARM_COMPUTE_LOG_PARAMS(src, wei, bia, attributes);
     PadStrideInfo conv_info(attributes.stride().x(), attributes.stride().y(), attributes.pad().left,
                             attributes.pad().right,
                             attributes.pad().top, attributes.pad().bottom, DimensionRoundingType::FLOOR);
     // Initialize the direct convolution descriptor
     const DirectConvComputeKernelInfo desc = config_direct_convolution_nhwc(src, wei, conv_info);
 
+    ITensorInfo *dst = sketch.implementation().create_intermediate_tensor();
+
     // Assert validation
-    ARM_COMPUTE_ERROR_THROW_ON(GpuConv2d::validate_op(sketch, src, wei, bia, dst, attributes));
+    ARM_COMPUTE_ERROR_THROW_ON(GpuConv2d::validate_op(sketch, src, wei, bia, attributes));
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, wei, dst);
 
     // Auto initialize dst tensor
@@ -295,6 +307,8 @@
 
     const auto op = sketch.implementation().operator_group().new_operator(operator_type, tensors);
     sketch.implementation().operator_group().add_operator(op);
+
+    return dst;
 }
 
 } // namespace dynamic_fusion
diff --git a/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp b/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
index 60c2281..cd5487c 100644
--- a/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
+++ b/src/dynamic_fusion/sketch/gpu/operators/GpuOutput.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -24,12 +24,13 @@
 
 #include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuOutput.h"
 
-#include "src/core/helpers/AutoConfiguration.h"
 #include "src/common/utils/Log.h"
+#include "src/core/helpers/AutoConfiguration.h"
 
 #include "src/dynamic_fusion/sketch/ArgumentPack.h"
 #include "src/dynamic_fusion/sketch/gpu/GpuWorkloadSketchImpl.h"
 #include "src/dynamic_fusion/sketch/gpu/components/cl/ClComponentStore.h"
+#include "src/dynamic_fusion/utils/Utils.h"
 
 namespace arm_compute
 {
@@ -65,7 +66,7 @@
 {
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src, dst);
     ARM_COMPUTE_RETURN_ERROR_ON(!src->has_valid_id());
-    ARM_COMPUTE_RETURN_ERROR_ON(!dst->has_valid_id());
+    ARM_COMPUTE_RETURN_ERROR_ON(!is_user_tensor(dst));
 
     // Initialize the destination tensor info.
     TensorInfo dst_to_validate = *dst;
@@ -79,12 +80,11 @@
     tensors.add_const_tensor(ACL_SRC_0, src);
     tensors.add_const_tensor(ACL_DST_0, &dst_to_validate);
 
-    const auto group = sketch.implementation().operator_group();
-    const auto op = group.new_operator(operator_type, tensors);
+    const auto group   = sketch.implementation().operator_group();
+    const auto op      = group.new_operator(operator_type, tensors);
     const auto success = group.try_add_operator(op, true);
 
     ARM_COMPUTE_RETURN_ERROR_ON_MSG(!success, "This operator cannot be fused into the workload.");
-    ARM_COMPUTE_UNUSED(success);
 
     const auto status = is_supported_op(*sketch.gpu_context(), src, dst);
     return status;
@@ -101,7 +101,7 @@
     auto_init_if_empty(*dst, *src);
 
     // Translate into components and add to component graph
-    auto &comp_graph = sketch.implementation().component_graph();
+    auto      &comp_graph = sketch.implementation().component_graph();
     const auto sketch_ctx = sketch.implementation().context();
 
     if(sketch_ctx->gpu_language() == GpuLanguage::OpenCL)
diff --git a/src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp b/src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp
index 2eafe62..0972b4e 100644
--- a/src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp
+++ b/src/dynamic_fusion/sketch/gpu/template_writer/GpuKernelVariableTable.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2022 Arm Limited.
+ * Copyright (c) 2022-2023 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -62,7 +62,7 @@
     {
         // Declare variable associated with the tensor
         std::stringstream ss;
-        ss << alias << "_t" << tensor->id();
+        ss << alias << "_t" << abs(tensor->id());
         const auto     uniq_name = ss.str();
         TensorVariable var{ tensor->id(), uniq_name, argument_info };
 
diff --git a/src/dynamic_fusion/utils/Utils.h b/src/dynamic_fusion/utils/Utils.h
new file mode 100644
index 0000000..063dbdc
--- /dev/null
+++ b/src/dynamic_fusion/utils/Utils.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2023 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef SRC_DYNAMIC_FUSION_UTILS_UTILS
+#define SRC_DYNAMIC_FUSION_UTILS_UTILS
+
+#include "arm_compute/core/ITensorInfo.h"
+
+namespace arm_compute
+{
+namespace experimental
+{
+namespace dynamic_fusion
+{
+bool is_user_tensor(const ITensorInfo *tensor_info)
+{
+    return tensor_info->id() > ITensorInfo::invalid_tensor_id;
+}
+
+bool is_intermediate_tensor(const ITensorInfo *tensor_info)
+{
+    return tensor_info->id() < ITensorInfo::invalid_tensor_id;
+}
+
+bool is_valid_tensor(const ITensorInfo *tensor_info)
+{
+    return tensor_info->has_valid_id();
+}
+
+bool is_invalid_tensor(const ITensorInfo *tensor_info)
+{
+    return !is_valid_tensor(tensor_info);
+}
+}
+}
+}
+
+#endif /* SRC_DYNAMIC_FUSION_UTILS_UTILS */