COMPMID-3375: Port NEActivationLayer functions/kernels to run on
different tensors.

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I98782bb73e9dc0899ffb1796aca6f99714adea94
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3343
Reviewed-by: Michalis Spyrou <michalis.spyrou@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/core/CPP/ICPPKernel.h b/arm_compute/core/CPP/ICPPKernel.h
index 21f6ab7..3ec5475 100644
--- a/arm_compute/core/CPP/ICPPKernel.h
+++ b/arm_compute/core/CPP/ICPPKernel.h
@@ -84,7 +84,7 @@
      * @param[in] window  Region on which to execute the kernel. (Must be a region of the window returned by window())
      * @param[in] info    Info about executing thread and CPU.
      */
-    virtual void run_op(const std::vector<InputOperatorTensors *> &inputs, std::vector<OutputOperatorTensors *> &outputs, const Window &window, const ThreadInfo &info)
+    virtual void run_op(const std::vector<InputTensor> &inputs, const std::vector<OutputTensor> &outputs, const Window &window, const ThreadInfo &info)
     {
         ARM_COMPUTE_UNUSED(inputs, outputs, window, info);
     }
diff --git a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
index 82103b9..399afa6 100644
--- a/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEActivationLayerKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2019 ARM Limited.
+ * Copyright (c) 2017-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -33,6 +33,7 @@
 
 namespace arm_compute
 {
+// Forward declarations
 class ITensor;
 
 /** Interface for the activation layer kernel. */
@@ -57,12 +58,12 @@
      *
      * @note If the output tensor is a nullptr, the activation function will be performed in-place
      *
-     * @param[in, out] input           Source tensor. In case of @p output tensor = nullptr, this tensor will store the result
+     * @param[in, out] input           Source tensor info. In case of @p output tensor = nullptr, this tensor will store the result
      *                                 of the activation function. Data types supported: QASYMM8/QASYMM8_SIGNED/QSYMM16/F16/F32.
-     * @param[out]     output          Destination tensor. Data type supported: same as @p input
+     * @param[out]     output          Destination tensor info. Data type supported: same as @p input
      * @param[in]      activation_info Activation layer information.
      */
-    void configure(ITensor *input, ITensor *output, ActivationLayerInfo activation_info);
+    void configure(const ITensorInfo *input, ITensorInfo *output, ActivationLayerInfo activation_info);
     /** Static function to check if given info will lead to a valid configuration of @ref NEActivationLayerKernel
      *
      * @param[in] input    Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result
@@ -75,7 +76,8 @@
     static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &act_info);
 
     // Inherited methods overridden:
-    void run(const Window &window, const ThreadInfo &info) override;
+    void run_op(const std::vector<InputTensor> &inputs, const std::vector<OutputTensor> &outputs,
+                const Window &window, const ThreadInfo &info) override;
 
 private:
     using ActivationFunction = ActivationLayerInfo::ActivationFunction;
@@ -83,36 +85,34 @@
      *
      * @param[in] window Region on which to execute the kernel.
      */
-    using ActivationFunctionExecutorPtr = void (NEActivationLayerKernel::*)(const Window &window);
+    using ActivationFunctionExecutorPtr = void (NEActivationLayerKernel::*)(const ITensor *src, ITensor *dst, const Window &window);
     /** Function to apply an activation function on a tensor.
      *
      * @param[in] window Region on which to execute the kernel
      */
     template <ActivationLayerInfo::ActivationFunction F, typename T>
     typename std::enable_if<arm_compute::utils::traits::is_floating_point<T>::value, void>::type
-    activation(const Window &window);
+    activation(const ITensor *src, ITensor *dst, const Window &window);
     /** Function to apply an activation function on a tensor.
      *
      * @param[in] window Region on which to execute the kernel
      */
     template <ActivationLayerInfo::ActivationFunction F, typename T>
-    typename std::enable_if<std::is_same<T, qasymm8_t>::value, void>::type activation(const Window &window);
+    typename std::enable_if<std::is_same<T, qasymm8_t>::value, void>::type activation(const ITensor *src, ITensor *dst, const Window &window);
     /** Function to apply an activation function on a tensor.
      *
      * @param[in] window Region on which to execute the kernel
      */
     template <ActivationLayerInfo::ActivationFunction F, typename T>
-    typename std::enable_if<std::is_same<T, qasymm8_signed_t>::value, void>::type activation(const Window &window);
+    typename std::enable_if<std::is_same<T, qasymm8_signed_t>::value, void>::type activation(const ITensor *src, ITensor *dst, const Window &window);
     /** Function to apply an activation function on a tensor.
      *
      * @param[in] window Region on which to execute the kernel
      */
     template <ActivationLayerInfo::ActivationFunction F, typename T>
-    typename std::enable_if<std::is_same<T, qsymm16_t>::value, void>::type activation(const Window &window);
+    typename std::enable_if<std::is_same<T, qsymm16_t>::value, void>::type activation(const ITensor *src, ITensor *dst, const Window &window);
 
 private:
-    ITensor                      *_input;
-    ITensor                      *_output;
     ActivationFunctionExecutorPtr _func;
     ActivationLayerInfo           _act_info;
 };
diff --git a/arm_compute/core/NEON/kernels/NEReshapeLayerKernel.h b/arm_compute/core/NEON/kernels/NEReshapeLayerKernel.h
index 6f888e0..7a4dce1 100644
--- a/arm_compute/core/NEON/kernels/NEReshapeLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEReshapeLayerKernel.h
@@ -57,8 +57,8 @@
     static Status validate(const ITensorInfo *input, const ITensorInfo *output);
 
     // Inherited methods overridden:
-    void run_op(const std::vector<InputOperatorTensors *> &inputs, std::vector<OutputOperatorTensors *> &outputs, const Window &window, const ThreadInfo &info) override;
+    void run_op(const std::vector<InputTensor> &inputs, const std::vector<OutputTensor> &outputs,
+                const Window &window, const ThreadInfo &info) override;
 };
-
 } // namespace arm_compute
 #endif /*ARM_COMPUTE_NERESHAPELAYERKERNEL_H */
diff --git a/arm_compute/core/experimental/Types.h b/arm_compute/core/experimental/Types.h
index 6043db9..2b55918 100644
--- a/arm_compute/core/experimental/Types.h
+++ b/arm_compute/core/experimental/Types.h
@@ -36,21 +36,43 @@
 /** Memory type */
 enum class TensorType
 {
-    ACL_SRC   = 0,
-    ACL_SRC_0 = 0,
-    ACL_SRC_1 = 1,
-    ACL_SRC_2 = 2,
-    ACL_DST   = 30,
-    ACL_DST_0 = 30,
-    ACL_DST_1 = 31,
-    ACL_INT   = 50,
-    ACL_INT_0 = 50,
-    ACL_INT_1 = 51,
-    ACL_INT_2 = 52
+    ACL_UNKNOWN = -1,
+    ACL_SRC     = 0,
+    ACL_SRC_0   = 0,
+    ACL_SRC_1   = 1,
+    ACL_SRC_2   = 2,
+    ACL_DST     = 30,
+    ACL_DST_0   = 30,
+    ACL_DST_1   = 31,
+    ACL_INT     = 50,
+    ACL_INT_0   = 50,
+    ACL_INT_1   = 51,
+    ACL_INT_2   = 52
 };
-using InputOperatorTensors  = std::pair<TensorType /* tensor type */, const ITensor * /* tensor object */>;
-using OutputOperatorTensors = std::pair<TensorType /* tensor type */, ITensor * /* tensor object */>;
-using OperatorTensors       = OutputOperatorTensors;
+
+/** Input tensor aggregate */
+struct InputTensor
+{
+    InputTensor(TensorType type, const ITensor *tensor)
+        : type(type), tensor(tensor)
+    {
+    }
+
+    TensorType     type{ TensorType::ACL_UNKNOWN };
+    const ITensor *tensor{ nullptr };
+};
+/** Output tensor aggregate */
+struct OutputTensor
+{
+    OutputTensor(TensorType type, ITensor *tensor)
+        : type(type), tensor(tensor)
+    {
+    }
+
+    TensorType type{ TensorType::ACL_UNKNOWN };
+    ITensor   *tensor{ nullptr };
+};
+using OperatorTensor = OutputTensor;
 
 namespace experimental
 {