Make CLArithmeticSubtraction kernel and function state-less

Resolves COMPMID-4008

Change-Id: Ic5f40610e771f31e6d301dfae976c81e9c79fa8b
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4917
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/core/gpu/cl/kernels/ClActivationKernel.h b/src/core/gpu/cl/kernels/ClActivationKernel.h
index 30adc55..68c309e 100644
--- a/src/core/gpu/cl/kernels/ClActivationKernel.h
+++ b/src/core/gpu/cl/kernels/ClActivationKernel.h
@@ -45,9 +45,9 @@
      * @note If the output tensor is a nullptr, the activation function will be performed in-place
      *
      * @param[in]      compile_context The compile context to be used.
-     * @param[in, out] src             Source tensor. In case of @p dst tensor = nullptr, this tensor will store the result
+     * @param[in, out] src             Source tensor info. In case of @p dst tensor = nullptr, this tensor will store the result
      *                                 of the activation function. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32.
-     * @param[out]     dst             Destination tensor. Data type supported: same as @p src
+     * @param[out]     dst             Destination tensor info. Data type supported: same as @p src
      * @param[in]      act_info        Activation layer information.
      */
     void configure(const ClCompileContext &compile_context, ITensorInfo *src, ITensorInfo *dst, ActivationLayerInfo act_info);
diff --git a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h b/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h
index 378a08a..d9fa905 100644
--- a/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h
+++ b/src/core/gpu/cl/kernels/ClBatchConcatenateKernel.h
@@ -46,9 +46,9 @@
     /** Initialise the kernel's source and destination
      *
      * @param[in]     compile_context The compile context to be used.
-     * @param[in]     src             Source tensor. Data types supported: All.
+     * @param[in]     src             Source tensor info. Data types supported: All.
      * @param[in]     batch_offset    The offset on axis # 3.
-     * @param[in,out] dst             Destination tensor. Data types supported: Same as @p src.
+     * @param[in,out] dst             Destination tensor info. Data types supported: Same as @p src.
      *
      * @note: The dst tensor's low two dimensions can't be smaller than the src one's.
      * @note: The gaps between the two lowest dimensions of src and dst need to be divisible by 2.
diff --git a/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h b/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h
index 144d7d4..5acfb33 100644
--- a/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h
+++ b/src/core/gpu/cl/kernels/ClDepthConcatenateKernel.h
@@ -46,9 +46,9 @@
     /** Initialise the kernel's source and destination
      *
      * @param[in]     compile_context The compile context to be used.
-     * @param[in]     src             Source tensor. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[in]     src             Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
      * @param[in]     depth_offset    The offset on the Z axis.
-     * @param[in,out] dst             Destination tensor. Data types supported: Same as @p src.
+     * @param[in,out] dst             Destination tensor info. Data types supported: Same as @p src.
      *
      * @note: The dst tensor's low two dimensions can't be smaller than the src one's.
      * @note: The gaps between the two lowest dimensions of src and dst need to be divisible by 2.
diff --git a/src/core/gpu/cl/kernels/ClFloorKernel.h b/src/core/gpu/cl/kernels/ClFloorKernel.h
index 09ab801..646dfb3 100644
--- a/src/core/gpu/cl/kernels/ClFloorKernel.h
+++ b/src/core/gpu/cl/kernels/ClFloorKernel.h
@@ -43,8 +43,8 @@
     /** Configure kernel for a given list of arguments
      *
      * @param[in]  compile_context The compile context to be used.
-     * @param[in]  src             Source tensor. Data type supported: F16/F32.
-     * @param[out] dst             Destination tensor. Same as @p src
+     * @param[in]  src             Source tensor info. Data type supported: F16/F32.
+     * @param[out] dst             Destination tensor info. Same as @p src
      */
     void configure(const ClCompileContext &compile_context, const ITensorInfo *src, ITensorInfo *dst);
 
diff --git a/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h b/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h
index 88cd4c4..9a4380a 100644
--- a/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h
+++ b/src/core/gpu/cl/kernels/ClHeightConcatenateKernel.h
@@ -46,9 +46,9 @@
     /** Initialise the kernel's source and destination
      *
      * @param[in]  compile_context The compile context to be used.
-     * @param[in]  src             Source tensor. Data types supported: All.
+     * @param[in]  src             Source tensor info. Data types supported: All.
      * @param[in]  height_offset   The starting offset on the Y axis for the dst tensor.
-     * @param[out] dst             Destination tensor. Data types supported: same as @p src.
+     * @param[out] dst             Destination tensor info. Data types supported: same as @p src.
      *
      */
     void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int height_offset, ITensorInfo *dst);
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h
index 9271500..ddade29 100644
--- a/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h
+++ b/src/core/gpu/cl/kernels/ClWidthConcatenate2TensorsKernel.h
@@ -46,9 +46,9 @@
     /** Initialise the kernel's sources and destination
      *
      * @param[in]  compile_context The compile context to be used.
-     * @param[in]  src1            First source tensor. Data types supported: All.
-     * @param[in]  src2            Second source tensor. Data types supported: same as @p src1
-     * @param[out] dst             Destination tensor. Data types supported: Same as @p src1.
+     * @param[in]  src1            First source tensor info. Data types supported: All.
+     * @param[in]  src2            Second source tensor info. Data types supported: same as @p src1
+     * @param[out] dst             Destination tensor info. Data types supported: Same as @p src1.
      */
     void configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst);
     /**  Static function to check if given info will lead to a valid configuration of @ref ClWidthConcatenate2TensorsKernel
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h
index 06d6c03..19bda65 100644
--- a/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h
+++ b/src/core/gpu/cl/kernels/ClWidthConcatenate4TensorsKernel.h
@@ -47,11 +47,11 @@
     /** Initialise the kernel's sources and destination
      *
      * @param[in]  compile_context The compile context to be used.
-     * @param[in]  src1            First source tensor. Data types supported: All.
-     * @param[in]  src2            Second source tensor. Data types supported: same as @p src1
-     * @param[in]  src3            Third source tensor. Data types supported: same as @p src1
-     * @param[in]  src4            Fourth source tensor. Data types supported: same as @p src1
-     * @param[out] dst             Destination tensor. Data types supported: same as @p src1.
+     * @param[in]  src1            First source tensor info. Data types supported: All.
+     * @param[in]  src2            Second source tensor info. Data types supported: same as @p src1
+     * @param[in]  src3            Third source tensor info. Data types supported: same as @p src1
+     * @param[in]  src4            Fourth source tensor info. Data types supported: same as @p src1
+     * @param[out] dst             Destination tensor info. Data types supported: same as @p src1.
      */
     void configure(const CLCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *src3, ITensorInfo *src4, ITensorInfo *dst);
     /**  Static function to check if given info will lead to a valid configuration of @ref ClWidthConcatenate4TensorsKernel
diff --git a/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h b/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h
index 3bffe52..6bc8e57 100644
--- a/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h
+++ b/src/core/gpu/cl/kernels/ClWidthConcatenateKernel.h
@@ -46,9 +46,9 @@
     /** Initialise the kernel's source and destination
      *
      * @param[in]     compile_context The compile context to be used.
-     * @param[in]     src             Source tensor. Data types supported: All.
+     * @param[in]     src             Source tensor info. Data types supported: All.
      * @param[in]     width_offset    The offset on the X axis.
-     * @param[in,out] dst             Destination tensor. Data types supported: same as @p src.
+     * @param[in,out] dst             Destination tensor info. Data types supported: same as @p src.
      *
      */
     void configure(const CLCompileContext &compile_context, ITensorInfo *src, unsigned int width_offset, ITensorInfo *dst);
diff --git a/src/runtime/CL/functions/CLElementwiseOperations.cpp b/src/runtime/CL/functions/CLElementwiseOperations.cpp
index 638990e..9b809ee 100644
--- a/src/runtime/CL/functions/CLElementwiseOperations.cpp
+++ b/src/runtime/CL/functions/CLElementwiseOperations.cpp
@@ -28,6 +28,7 @@
 #include "src/core/gpu/cl/kernels/ClElementwiseKernel.h"
 
 #include "src/runtime/gpu/cl/operators/ClAdd.h"
+#include "src/runtime/gpu/cl/operators/ClSub.h"
 
 #include <utility>
 
@@ -35,28 +36,6 @@
 {
 namespace experimental
 {
-CLArithmeticSubtraction::CLArithmeticSubtraction()
-{
-}
-void CLArithmeticSubtraction::configure(const CLCompileContext &compile_context, ITensorInfo *input1, ITensorInfo *input2, ITensorInfo *output, ConvertPolicy policy,
-                                        const ActivationLayerInfo &act_info)
-{
-    auto k = std::make_unique<arm_compute::opencl::kernels::ClSaturatedArithmeticKernel>();
-    k->configure(compile_context, ArithmeticOperation::SUB, input1, input2, output, policy, act_info);
-    _kernel = std::move(k);
-}
-
-Status CLArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
-{
-    ARM_COMPUTE_UNUSED(policy);
-    return arm_compute::opencl::kernels::ClSaturatedArithmeticKernel::validate(ArithmeticOperation::SUB, input1, input2, output, policy, act_info);
-}
-
-void CLArithmeticSubtraction::run(ITensorPack &tensors)
-{
-    ICLOperator::run(tensors);
-}
-
 CLArithmeticDivision::CLArithmeticDivision()
 {
 }
@@ -210,10 +189,10 @@
 
 struct CLArithmeticSubtraction::Impl
 {
-    const ICLTensor                                       *src_0{ nullptr };
-    const ICLTensor                                       *src_1{ nullptr };
-    ICLTensor                                             *dst{ nullptr };
-    std::unique_ptr<experimental::CLArithmeticSubtraction> op{ nullptr };
+    const ICLTensor               *src_0{ nullptr };
+    const ICLTensor               *src_1{ nullptr };
+    ICLTensor                     *dst{ nullptr };
+    std::unique_ptr<opencl::ClSub> op{ nullptr };
 };
 
 CLArithmeticSubtraction::CLArithmeticSubtraction()
@@ -235,13 +214,13 @@
     _impl->src_0 = input1;
     _impl->src_1 = input2;
     _impl->dst   = output;
-    _impl->op    = std::make_unique<experimental::CLArithmeticSubtraction>();
+    _impl->op    = std::make_unique<opencl::ClSub>();
     _impl->op->configure(compile_context, input1->info(), input2->info(), output->info(), policy, act_info);
 }
 
 Status CLArithmeticSubtraction::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy, const ActivationLayerInfo &act_info)
 {
-    return experimental::CLArithmeticSubtraction::validate(input1, input2, output, policy, act_info);
+    return opencl::ClSub::validate(input1, input2, output, policy, act_info);
 }
 
 void CLArithmeticSubtraction::run()
diff --git a/src/runtime/gpu/cl/operators/ClAdd.h b/src/runtime/gpu/cl/operators/ClAdd.h
index 2854c16..f751d8d 100644
--- a/src/runtime/gpu/cl/operators/ClAdd.h
+++ b/src/runtime/gpu/cl/operators/ClAdd.h
@@ -58,11 +58,11 @@
      *   - (QSYMM16,QSYMM16) -> QSYMM16
      *
      * @param[in]      compile_context The compile context to be used.
-     * @param[in, out] src1            First source tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+     * @param[in, out] src1            First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
      *                                 The source tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
-     * @param[in, out] src2            Second source tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+     * @param[in, out] src2            Second source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
      *                                 The source tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
-     * @param[out]     dst             Destination tensor. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+     * @param[out]     dst             Destination tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
      * @param[in]      policy          Policy to use to handle overflow.
      * @param[in]      act_info        (Optional) Activation layer information in case of a fused activation.
      */
diff --git a/src/runtime/gpu/cl/operators/ClConcatenate.h b/src/runtime/gpu/cl/operators/ClConcatenate.h
index 112e2ac..0d960a6 100644
--- a/src/runtime/gpu/cl/operators/ClConcatenate.h
+++ b/src/runtime/gpu/cl/operators/ClConcatenate.h
@@ -54,8 +54,8 @@
      *
      *
      * @param[in]     compile_context The compile context to be used.
-     * @param[in,out] src_vector      The vectors containing all the tensors to concatenate. Data types supported: All
-     * @param[out]    dst             Destination tensor. Data types supported: same as @p src_vector.
+     * @param[in,out] src_vector      The vectors containing all the tensors info to concatenate. Data types supported: All
+     * @param[out]    dst             Destination tensor info. Data types supported: same as @p src_vector.
      * @param[in]     axis            Concatenation axis. Supported underlying concatenation axis are 0, 1, 2 and 3.
      */
     void configure(const ClCompileContext &compile_context, const std::vector<ITensorInfo *> &src_vector, ITensorInfo *dst, size_t axis);
diff --git a/src/runtime/gpu/cl/operators/ClSub.cpp b/src/runtime/gpu/cl/operators/ClSub.cpp
new file mode 100644
index 0000000..429f23a
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClSub.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/gpu/cl/operators/ClSub.h"
+
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/core/gpu/cl/kernels/ClElementwiseKernel.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+void ClSub::configure(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst,
+                      ConvertPolicy policy, const ActivationLayerInfo &act_info)
+{
+    auto k = std::make_unique<kernels::ClSaturatedArithmeticKernel>();
+    k->configure(compile_context, ArithmeticOperation::SUB, src1, src2, dst, policy, act_info);
+    _kernel = std::move(k);
+}
+
+Status ClSub::validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst,
+                       ConvertPolicy policy, const ActivationLayerInfo &act_info)
+{
+    return kernels::ClSaturatedArithmeticKernel::validate(ArithmeticOperation::SUB, src1, src2, dst, policy, act_info);
+}
+} // namespace opencl
+} // namespace arm_compute
diff --git a/src/runtime/gpu/cl/operators/ClSub.h b/src/runtime/gpu/cl/operators/ClSub.h
new file mode 100644
index 0000000..bcad84d
--- /dev/null
+++ b/src/runtime/gpu/cl/operators/ClSub.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CL_SUB_H
+#define ARM_COMPUTE_CL_SUB_H
+
+#include "src/core/gpu/cl/ClCompileContext.h"
+#include "src/runtime/gpu/cl/IClOperator.h"
+
+namespace arm_compute
+{
+namespace opencl
+{
+/** Basic function to run arithmetic subtraction
+ *
+ * @note The tensor data type for the inputs must be U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+ * @note The function performs an arithmetic subtraction between two tensors.
+ */
+class ClSub : public IClOperator
+{
+public:
+    /** Default Constructor */
+    ClSub() = default;
+    /** Configure function for a given list of arguments.
+     *
+     * Valid configurations (src1,src2) -> dst :
+     *
+     *   - (U8,U8)           -> U8
+     *   - (U8,U8)           -> S16
+     *   - (S16,U8)          -> S16
+     *   - (U8,S16)          -> S16
+     *   - (S16,S16)         -> S16
+     *   - (S32,S32)         -> S32
+     *   - (F16,F16)         -> F16
+     *   - (F32,F32)         -> F32
+     *   - (QASYMM8,QASYMM8) -> QASYMM8
+     *   - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
+     *   - (QSYMM16,QSYMM16) -> QSYMM16
+     *
+     * @param[in]      compile_context The compile context to be used.
+     * @param[in, out] src1            First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+     *                                 The source tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[in, out] src2            Second source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+     *                                 The source tensor is [in, out] because its TensorInfo might be modified inside the kernel in case of broadcasting of dimension 0.
+     * @param[out]     dst             Destination tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+     * @param[in]      policy          Policy to use to handle overflow.
+     * @param[in]      act_info        (Optional) Activation layer information in case of a fused activation.
+     */
+    void configure(const ClCompileContext &compile_context, ITensorInfo *src1, ITensorInfo *src2, ITensorInfo *dst, ConvertPolicy policy,
+                   const ActivationLayerInfo &act_info = ActivationLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref ClSub
+     *
+     * Valid configurations (src1,src2) -> dst :
+     *
+     *   - (U8,U8)           -> U8
+     *   - (U8,U8)           -> S16
+     *   - (S16,U8)          -> S16
+     *   - (U8,S16)          -> S16
+     *   - (S16,S16)         -> S16
+     *   - (S32,S32)         -> S32
+     *   - (F16,F16)         -> F16
+     *   - (F32,F32)         -> F32
+     *   - (QASYMM8,QASYMM8) -> QASYMM8
+     *   - (QASYMM8_SIGNED,QASYMM8_SIGNED) -> QASYMM8_SIGNED
+     *   - (QSYMM16,QSYMM16) -> QSYMM16
+     *
+     * @param[in] src1     First source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+     * @param[in] src2     Second source tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+     * @param[in] dst      Destination tensor info. Data types supported: U8/QASYMM8/QASYMM8_SIGNED/S16/QSYMM16/S32/F16/F32.
+     * @param[in] policy   Policy to use to handle overflow.
+     * @param[in] act_info (Optional) Activation layer information in case of a fused activation.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src1, const ITensorInfo *src2, const ITensorInfo *dst, ConvertPolicy policy,
+                           const ActivationLayerInfo &act_info = ActivationLayerInfo());
+};
+} // namespace opencl
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CL_SUB_H */