Make Concatenate kernels and operator stateless

- Rename all concatenate kernels to use the Cpu prefix and move
appropriately

Change-Id: If647173e84969936ebd211d4d5ae6d1e73150bdc
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4799
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Sang-Hoon Park <sang-hoon.park@arm.com>
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
diff --git a/src/runtime/NEON/functions/NEConcatenateLayer.cpp b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
index 782f8f1..dcc5cd3 100644
--- a/src/runtime/NEON/functions/NEConcatenateLayer.cpp
+++ b/src/runtime/NEON/functions/NEConcatenateLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2020 Arm Limited.
+ * Copyright (c) 2018-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,10 +23,7 @@
  */
 #include "arm_compute/runtime/NEON/functions/NEConcatenateLayer.h"
 
-#include "src/core/NEON/kernels/NEBatchConcatenateLayerKernel.h"
-#include "src/core/NEON/kernels/NEDepthConcatenateLayerKernel.h"
-#include "src/core/NEON/kernels/NEHeightConcatenateLayerKernel.h"
-#include "src/core/NEON/kernels/NEWidthConcatenateLayerKernel.h"
+#include "src/runtime/cpu/operators/CpuConcatenate.h"
 
 #include "arm_compute/core/utils/misc/ShapeCalculator.h"
 #include "arm_compute/runtime/NEON/NEScheduler.h"
@@ -39,156 +36,22 @@
 
 namespace arm_compute
 {
-namespace experimental
-{
-NEConcatenation::NEConcatenation()
-    : _concat_kernels(), _num_inputs(0), _axis(0)
-{
-}
-
-void NEConcatenation::configure(const std::vector<const ITensorInfo *> &inputs_vector, ITensorInfo *output, size_t axis)
-{
-    ARM_COMPUTE_ERROR_ON(output == nullptr);
-
-    _axis       = axis;
-    _num_inputs = inputs_vector.size();
-
-    TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis);
-
-    // Output auto inizialitation if not yet initialized
-    auto_init_if_empty(*output, output_shape, 1, inputs_vector[0]->data_type());
-    ARM_COMPUTE_ERROR_THROW_ON(NEConcatenateLayer::validate(inputs_vector, output, axis));
-
-    unsigned int offset = 0;
-
-    for(unsigned int i = 0; i < _num_inputs; ++i)
-    {
-        switch(axis)
-        {
-            case Window::DimX:
-            {
-                auto kernel = std::make_unique<NEWidthConcatenateLayerKernel>();
-                kernel->configure(inputs_vector.at(i), offset, output);
-                _concat_kernels.emplace_back(std::move(kernel));
-                break;
-            }
-            case Window::DimY:
-            {
-                auto kernel = std::make_unique<NEHeightConcatenateLayerKernel>();
-                kernel->configure(inputs_vector.at(i), offset, output);
-                _concat_kernels.emplace_back(std::move(kernel));
-                break;
-            }
-            case Window::DimZ:
-            {
-                auto kernel = std::make_unique<NEDepthConcatenateLayerKernel>();
-                kernel->configure(inputs_vector.at(i), offset, output);
-                _concat_kernels.emplace_back(std::move(kernel));
-                break;
-            }
-            case 3:
-            {
-                auto kernel = std::make_unique<NEBatchConcatenateLayerKernel>();
-                kernel->configure(inputs_vector.at(i), offset, output);
-                _concat_kernels.emplace_back(std::move(kernel));
-                break;
-            }
-            default:
-                ARM_COMPUTE_ERROR("Axis not supported");
-        }
-        offset += inputs_vector.at(i)->dimension(axis);
-    }
-}
-
-Status NEConcatenation::validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis)
-{
-    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(output);
-    ARM_COMPUTE_RETURN_ERROR_ON(inputs_vector.size() < 2);
-
-    unsigned int offset = 0;
-    for(const auto &input : inputs_vector)
-    {
-        ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input);
-        switch(axis)
-        {
-            case Window::DimX:
-            {
-                ARM_COMPUTE_RETURN_ON_ERROR(NEWidthConcatenateLayerKernel::validate(input, offset, output));
-                break;
-            }
-            case Window::DimY:
-            {
-                ARM_COMPUTE_RETURN_ON_ERROR(NEHeightConcatenateLayerKernel::validate(input, offset, output));
-                break;
-            }
-            case Window::DimZ:
-            {
-                ARM_COMPUTE_RETURN_ON_ERROR(NEDepthConcatenateLayerKernel::validate(input, offset, output));
-                break;
-            }
-            case 3:
-            {
-                ARM_COMPUTE_RETURN_ON_ERROR(NEBatchConcatenateLayerKernel::validate(input, offset, output));
-                break;
-            }
-            default:
-                ARM_COMPUTE_ERROR("Axis not supported");
-        }
-        offset += input->dimension(axis);
-    }
-
-    if(output->total_size() != 0)
-    {
-        TensorShape output_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(inputs_vector, axis);
-        ARM_COMPUTE_RETURN_ERROR_ON(output_shape.total_size() != output->tensor_shape().total_size());
-    }
-
-    return Status{};
-}
-
-void NEConcatenation::run(ITensorPack &tensors)
-{
-    if(tensors.empty())
-    {
-        ARM_COMPUTE_ERROR("No inputs provided");
-    }
-
-    if(static_cast<int>(tensors.size() - 1) != static_cast<int>(_num_inputs))
-    {
-        ARM_COMPUTE_ERROR("Configured with different number of inputs");
-    }
-
-    int i = 0;
-    for(auto &k : _concat_kernels)
-    {
-        ITensorPack pack;
-        pack.add_tensor(TensorType::ACL_SRC, tensors.get_const_tensor(ACL_SRC_VEC + i));
-        pack.add_tensor(TensorType::ACL_DST, tensors.get_tensor(ACL_DST));
-        NEScheduler::get().schedule_op(k.get(), Window::DimY, pack);
-        ++i;
-    }
-}
-} // namespace experimental
-
 struct NEConcatenateLayer::Impl
 {
-    std::vector<const ITensor *>                   srcs{};
-    ITensor                                       *dst{ nullptr };
-    unsigned int                                   num_inputs{ 0 };
-    unsigned int                                   axis{ 0 };
-    std::unique_ptr<experimental::NEConcatenation> op{ nullptr };
+    std::vector<const ITensor *>         srcs{};
+    ITensor                             *dst{ nullptr };
+    unsigned int                         num_inputs{ 0 };
+    unsigned int                         axis{ 0 };
+    std::unique_ptr<cpu::CpuConcatenate> op{ nullptr };
 };
 
 NEConcatenateLayer::NEConcatenateLayer()
     : _impl(std::make_unique<Impl>())
 {
 }
-
 NEConcatenateLayer::NEConcatenateLayer(NEConcatenateLayer &&) = default;
-
 NEConcatenateLayer &NEConcatenateLayer::operator=(NEConcatenateLayer &&) = default;
-
-NEConcatenateLayer::~NEConcatenateLayer() = default;
+NEConcatenateLayer::~NEConcatenateLayer()                                = default;
 
 void NEConcatenateLayer::configure(std::vector<const ITensor *> inputs_vector, ITensor *output, size_t axis)
 {
@@ -198,7 +61,7 @@
     _impl->dst        = output;
     _impl->axis       = axis;
     _impl->num_inputs = inputs_vector.size();
-    _impl->op         = std::make_unique<experimental::NEConcatenation>();
+    _impl->op         = std::make_unique<cpu::CpuConcatenate>();
 
     std::vector<const ITensorInfo *> inputs_vector_info;
     for(unsigned int i = 0; i < inputs_vector.size(); ++i)
@@ -211,7 +74,7 @@
 
 Status NEConcatenateLayer::validate(const std::vector<const ITensorInfo *> &inputs_vector, const ITensorInfo *output, size_t axis)
 {
-    return experimental::NEConcatenation::validate(inputs_vector, output, axis);
+    return cpu::CpuConcatenate::validate(inputs_vector, output, axis);
 }
 
 void NEConcatenateLayer::run()
diff --git a/src/runtime/cpu/operators/CpuConcatenate.cpp b/src/runtime/cpu/operators/CpuConcatenate.cpp
new file mode 100644
index 0000000..2094e65
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuConcatenate.cpp
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2018-2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuConcatenate.h"
+
+#include "src/core/cpu/kernels/CpuConcatenateBatchKernel.h"
+#include "src/core/cpu/kernels/CpuConcatenateDepthKernel.h"
+#include "src/core/cpu/kernels/CpuConcatenateHeightKernel.h"
+#include "src/core/cpu/kernels/CpuConcatenateWidthKernel.h"
+
+#include "arm_compute/core/utils/misc/ShapeCalculator.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Types.h"
+#include "arm_compute/core/Validate.h"
+#include "src/core/helpers/AutoConfiguration.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+CpuConcatenate::CpuConcatenate()
+    : _concat_kernels(), _num_srcs(0), _axis(0)
+{
+}
+
+void CpuConcatenate::configure(const std::vector<const ITensorInfo *> &srcs_vector, ITensorInfo *dst, size_t axis)
+{
+    ARM_COMPUTE_ERROR_ON(dst == nullptr);
+
+    _axis     = axis;
+    _num_srcs = srcs_vector.size();
+
+    TensorShape dst_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(srcs_vector, axis);
+
+    // Output auto inizialitation if not yet initialized
+    auto_init_if_empty(*dst, dst_shape, 1, srcs_vector[0]->data_type());
+    ARM_COMPUTE_ERROR_THROW_ON(CpuConcatenate::validate(srcs_vector, dst, axis));
+
+    unsigned int offset = 0;
+
+    for(unsigned int i = 0; i < _num_srcs; ++i)
+    {
+        switch(axis)
+        {
+            case Window::DimX:
+            {
+                auto kernel = std::make_unique<kernels::CpuConcatenateWidthKernel>();
+                kernel->configure(srcs_vector.at(i), offset, dst);
+                _concat_kernels.emplace_back(std::move(kernel));
+                break;
+            }
+            case Window::DimY:
+            {
+                auto kernel = std::make_unique<kernels::CpuConcatenateHeightKernel>();
+                kernel->configure(srcs_vector.at(i), offset, dst);
+                _concat_kernels.emplace_back(std::move(kernel));
+                break;
+            }
+            case Window::DimZ:
+            {
+                auto kernel = std::make_unique<kernels::CpuConcatenateDepthKernel>();
+                kernel->configure(srcs_vector.at(i), offset, dst);
+                _concat_kernels.emplace_back(std::move(kernel));
+                break;
+            }
+            case 3:
+            {
+                auto kernel = std::make_unique<kernels::CpuConcatenateBatchKernel>();
+                kernel->configure(srcs_vector.at(i), offset, dst);
+                _concat_kernels.emplace_back(std::move(kernel));
+                break;
+            }
+            default:
+                ARM_COMPUTE_ERROR("Axis not supported");
+        }
+        offset += srcs_vector.at(i)->dimension(axis);
+    }
+}
+
+Status CpuConcatenate::validate(const std::vector<const ITensorInfo *> &srcs_vector, const ITensorInfo *dst, size_t axis)
+{
+    ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(dst);
+    ARM_COMPUTE_RETURN_ERROR_ON(srcs_vector.size() < 2);
+
+    unsigned int offset = 0;
+    for(const auto &src : srcs_vector)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(src);
+        switch(axis)
+        {
+            case Window::DimX:
+            {
+                ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuConcatenateWidthKernel::validate(src, offset, dst));
+                break;
+            }
+            case Window::DimY:
+            {
+                ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuConcatenateHeightKernel::validate(src, offset, dst));
+                break;
+            }
+            case Window::DimZ:
+            {
+                ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuConcatenateDepthKernel::validate(src, offset, dst));
+                break;
+            }
+            case 3:
+            {
+                ARM_COMPUTE_RETURN_ON_ERROR(kernels::CpuConcatenateBatchKernel::validate(src, offset, dst));
+                break;
+            }
+            default:
+                ARM_COMPUTE_ERROR("Axis not supported");
+        }
+        offset += src->dimension(axis);
+    }
+
+    if(dst->total_size() != 0)
+    {
+        TensorShape dst_shape = arm_compute::misc::shape_calculator::calculate_concatenate_shape(srcs_vector, axis);
+        ARM_COMPUTE_RETURN_ERROR_ON(dst_shape.total_size() != dst->tensor_shape().total_size());
+    }
+
+    return Status{};
+}
+
+void CpuConcatenate::run(ITensorPack &tensors)
+{
+    if(tensors.empty())
+    {
+        ARM_COMPUTE_ERROR("No inputs provided");
+    }
+
+    if(static_cast<int>(tensors.size() - 1) != static_cast<int>(_num_srcs))
+    {
+        ARM_COMPUTE_ERROR("Configured with different number of inputs");
+    }
+
+    int i = 0;
+    for(auto &k : _concat_kernels)
+    {
+        ITensorPack pack;
+        pack.add_tensor(TensorType::ACL_SRC, tensors.get_const_tensor(ACL_SRC_VEC + i));
+        pack.add_tensor(TensorType::ACL_DST, tensors.get_tensor(ACL_DST));
+        NEScheduler::get().schedule_op(k.get(), Window::DimY, pack);
+        ++i;
+    }
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuConcatenate.h b/src/runtime/cpu/operators/CpuConcatenate.h
new file mode 100644
index 0000000..3765342
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuConcatenate.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_CONCATENATE_H
+#define ARM_COMPUTE_CPU_CONCATENATE_H
+
+#include "src/core/cpu/ICpuKernel.h"
+#include "src/runtime/cpu/ICpuOperator.h"
+
+#include <vector>
+
+namespace arm_compute
+{
+namespace cpu
+{
+/** Basic function to execute concatenate tensors along a given axis. This function calls the following kernels:
+ *
+ * -# @ref CpuConcatenateWidthKernel (if underlying concatenation axis is 0).
+ * -# @ref CpuConcatenateHeightKernel (if underlying concatenation axis is 1).
+ * -# @ref CpuConcatenateDepthKernel (if underlying concatenation axis is 2).
+ * -# @ref CpuConcatenateBatchKernel (if underlying concatenation axis is 3).
+ */
+class CpuConcatenate : public ICpuOperator
+{
+public:
+    /** Constructor */
+    CpuConcatenate();
+    /** Configure operator for a given list of arguments
+     *
+     * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
+     * @note Preconditions can be found respectively at @ref CpuConcatenateWidthKernel, @ref CpuConcatenateHeightKernel, @ref CpuConcatenateDepthKernel and @ref CpuConcatenateBatchKernel.
+     *
+     * @param[in,out] srcs_vector The vectors containing all the tensors to concatenate. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[out]    dst         Output tensor. Data types supported: Same as @p srcs_vector.
+     * @param[in]     axis        Concatenation axis. Supported underlying concatenation axis are 0, 1, 2 and 3.
+     */
+    void configure(const std::vector<const ITensorInfo *> &srcs_vector, ITensorInfo *dst, size_t axis);
+    /** Static function to check if given info will lead to a valid configuration of @ref NEConcatenateLayer
+     *
+     * @note Input and output tensor dimensions preconditions defer depending on the concatenation axis.
+     * @note Preconditions can be found respectively at @ref CpuConcatenateWidthKernel, @ref CpuConcatenateHeightKernel, @ref CpuConcatenateDepthKernel and @ref CpuConcatenateBatchKernel.
+     *
+     * @param[in] srcs_vector The vectors containing all the tensors info to concatenate. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[in] dst         Output tensor info. Data types supported: Same as @p srcs_vector.
+     * @param[in] axis        Concatenation axis. Supported underlying concatenation axis are 0, 1, 2 and 3.
+     *
+     * @return a status
+     */
+    static Status validate(const std::vector<const ITensorInfo *> &srcs_vector, const ITensorInfo *dst, size_t axis);
+
+    // Inherited methods overridden:
+    void run(ITensorPack &tensors) override;
+
+private:
+    std::vector<std::unique_ptr<ICpuKernel>> _concat_kernels;
+    unsigned int                             _num_srcs;
+    unsigned int                             _axis;
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_CONCATENATE_H */