Make NEON Pooling kernels and functions state-less

Partially resolves COMPMID-3999

Change-Id: Ib39d40694df5c5f0a9401488e0c3af3ac26e8c55
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4984
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/runtime/cpu/operators/CpuPooling.cpp b/src/runtime/cpu/operators/CpuPooling.cpp
new file mode 100644
index 0000000..0b9b38d
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuPooling.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuPooling.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/core/NEON/kernels/NEFillBorderKernel.h"
+#include "src/core/cpu/kernels/CpuPoolingKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+CpuPooling::CpuPooling(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_manager(std::move(memory_manager)), _pooling_layer_kernel(), _border_handler(), _asm_glue(), _is_global_pooling_layer(false), _data_layout(DataLayout::NCHW)
+{
+}
+
+CpuPooling::~CpuPooling() = default;
+
+void CpuPooling::configure(ITensorInfo *input, ITensorInfo *output, const PoolingLayerInfo &pool_info, ITensorInfo *indices)
+{
+    // Check if we can run assembly kernels. Currently, indices are not supported by those kernels
+    const bool run_optimised = bool(CpuPoolingAssemblyDispatch::validate(input, output, pool_info)) && (indices == nullptr);
+
+    if(run_optimised)
+    {
+        _asm_glue = std::make_unique<CpuPoolingAssemblyDispatch>(_memory_manager);
+        _asm_glue->configure(input, output, pool_info);
+        ARM_COMPUTE_ERROR_ON(!_asm_glue->is_configured());
+    }
+    else
+    {
+        // Check if we have Global Pooling Layer
+        _is_global_pooling_layer = (input->dimension(0) == pool_info.pool_size.width) && (input->dimension(1) == pool_info.pool_size.height);
+
+        // Get data layout
+        _data_layout = pool_info.data_layout == DataLayout::UNKNOWN ? input->data_layout() : pool_info.data_layout;
+
+        // Configure pooling kernel
+        auto k = std::make_unique<kernels::CpuPoolingKernel>();
+        k->configure(input, output, pool_info, indices);
+        _pooling_layer_kernel = std::move(k);
+
+        switch(_data_layout)
+        {
+            case DataLayout::NCHW:
+            {
+                // Configure border depending on operation required (quantize border in case of asymmetric data_type)
+                BorderMode border_mode = (!indices && pool_info.pool_type == PoolingType::MAX) ? BorderMode::REPLICATE : BorderMode::CONSTANT;
+                PixelValue zero_value((indices) ? std::numeric_limits<int>::min() : 0.f);
+                if(is_data_type_quantized_asymmetric(input->data_type()) && !pool_info.exclude_padding)
+                {
+                    zero_value = PixelValue(0, input->data_type(), input->quantization_info());
+                }
+                auto b = std::make_unique<NEFillBorderKernel>();
+                b->configure(input, _pooling_layer_kernel->border_size(), border_mode, zero_value);
+                _border_handler = std::move(b);
+                break;
+            }
+            case DataLayout::NHWC:
+                break;
+            default:
+                ARM_COMPUTE_ERROR("Data layout not supported");
+        }
+    }
+}
+
+Status CpuPooling::validate(const ITensorInfo *input, const ITensorInfo *output, const PoolingLayerInfo &pool_info, const ITensorInfo *indices)
+{
+    const bool run_optimised = bool(CpuPoolingAssemblyDispatch::validate(input, output, pool_info)) && (indices == nullptr);
+
+    if(run_optimised)
+    {
+        return Status{};
+    }
+
+    return kernels::CpuPoolingKernel::validate(input, output, pool_info, indices);
+}
+
+void CpuPooling::run(ITensorPack &tensors)
+{
+    if(_asm_glue && _asm_glue->is_configured())
+    {
+        _asm_glue->run(tensors);
+    }
+    else
+    {
+        switch(_data_layout)
+        {
+            case DataLayout::NCHW:
+                // Fill border
+                NEScheduler::get().schedule_op(_border_handler.get(), Window::DimY, _border_handler->window(), tensors);
+
+                // Run pooling layer
+                NEScheduler::get().schedule_op(_pooling_layer_kernel.get(), _is_global_pooling_layer ? Window::DimZ : Window::DimY, _pooling_layer_kernel->window(), tensors);
+                break;
+            case DataLayout::NHWC:
+                // Run pooling layer
+                NEScheduler::get().schedule_op(_pooling_layer_kernel.get(), Window::DimX, _pooling_layer_kernel->window(), tensors);
+                break;
+            default:
+                ARM_COMPUTE_ERROR("Data layout not supported");
+        }
+    }
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuPooling.h b/src/runtime/cpu/operators/CpuPooling.h
new file mode 100644
index 0000000..aa607b4
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuPooling.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_POOLING_H
+#define ARM_COMPUTE_CPU_POOLING_H
+
+#include "src/runtime/cpu/ICpuOperator.h"
+
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+// Forward Declarations
+struct PoolingLayerInfo;
+
+namespace cpu
+{
+// Forward Declarations
+class CpuPoolingAssemblyDispatch;
+/** Basic function to simulate a pooling layer with the specified pooling operation. This function calls the following NEON kernels:
+ *
+ * -# @ref NEFillBorderKernel (executed if padding size is different from zero)
+ * -# @ref kernels::CpuPoolingKernel
+ * -# @ref CpuPoolingAssemblyDispatch
+ */
+class CpuPooling : public ICpuOperator
+{
+public:
+    /** Constructor */
+    CpuPooling(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CpuPooling(const CpuPooling &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CpuPooling &operator=(const CpuPooling &) = delete;
+    /** Prevent instances of this class from being moved (As this class contains non movable objects) */
+    CpuPooling(CpuPooling &&) = delete;
+    /** Prevent instances of this class from being moved (As this class contains non movable objects) */
+    CpuPooling &operator=(CpuPooling &&) = delete;
+    /** Default destructor */
+    ~CpuPooling();
+    /** Set the src and dst tensors.
+     *
+     * @note F16 is supported for pool sizes 2 and 3 only
+     *
+     * @param[in, out] src       Source tensor info. (Written to only when padding != 0) Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[out]     dst       Destination tensor info. Data types supported: same as @p src.
+     * @param[in]      pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
+     * @param[out]     indices   (optional) The indices of the maximal values. Data type supported: U32.
+     */
+    void configure(ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &pool_info, ITensorInfo *indices = nullptr);
+    /** Static function to check if given info will lead to a valid configuration of @ref CpuPooling
+     *
+     * @note F16 is supported for pool sizes 2 and 3 only
+     *
+     * @param[in] src       Source tensor info. (Written to only when padding != 0) Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[in] dst       Destination tensor info. Data types supported: same as @p src.
+     * @param[in] pool_info Contains pooling operation information described in @ref PoolingLayerInfo.
+     * @param[in] indices   (optional) Tensor info of the indices of the maximal values. Data type supported: U32.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &pool_info, const ITensorInfo *indices = nullptr);
+
+    // Inherited methods overridden:
+    void run(ITensorPack &tensors) override;
+
+private:
+    std::shared_ptr<IMemoryManager> _memory_manager;
+
+    std::unique_ptr<INEKernel>                  _pooling_layer_kernel;
+    std::unique_ptr<INEKernel>                  _border_handler;
+    std::unique_ptr<CpuPoolingAssemblyDispatch> _asm_glue;
+
+    bool       _is_global_pooling_layer;
+    DataLayout _data_layout;
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_POOLING_H */
diff --git a/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp b/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp
new file mode 100644
index 0000000..4a56233
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.cpp
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.h"
+
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+#include "src/core/CPP/Validate.h"
+#include "src/core/cpu/kernels/CpuPoolingAssemblyWrapperKernel.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+CpuPoolingAssemblyDispatch::CpuPoolingAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager)
+    : _memory_group(std::move(memory_manager)),
+      _workspace(),
+      _is_global_pooling_layer(false)
+{
+}
+
+CpuPoolingAssemblyDispatch::~CpuPoolingAssemblyDispatch() = default;
+
+void CpuPoolingAssemblyDispatch::configure(const ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info)
+{
+    const CPUInfo     &ci          = NEScheduler::get().cpu_info();
+    const unsigned int num_threads = NEScheduler::get().num_threads();
+
+    // If we don't support a combination of data types, silently return: it is the caller's responsibility to check if configure() was successful via is_configured()
+    if(!CpuPoolingAssemblyDispatch::validate(src, dst, info))
+    {
+        return;
+    }
+
+    auto pooling_wrapper = std::make_unique<kernels::CpuPoolingAssemblyWrapperKernel>();
+    ARM_COMPUTE_ERROR_ON(pooling_wrapper == nullptr);
+    pooling_wrapper->configure(src, dst, info, ci);
+
+    // Check if we have Global Pooling Layer
+    _is_global_pooling_layer = (src->dimension(2) == info.pool_size.width) && (src->dimension(1) == info.pool_size.height);
+
+    // Allocate workspace based on kernel's memory requirements
+    constexpr size_t alignment      = 4096;
+    const size_t     workspace_size = pooling_wrapper->get_working_size(num_threads);
+    _workspace.allocator()->init(TensorInfo(TensorShape{ (workspace_size + alignment /* FIXME: remove alignment after COMPMID-1088 */) }, 1, DataType::S8), alignment);
+    _memory_group.manage(&_workspace);
+    _workspace.allocator()->allocate();
+
+    _kernel = std::move(pooling_wrapper);
+}
+
+Status CpuPoolingAssemblyDispatch::validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info)
+{
+    return kernels::CpuPoolingAssemblyWrapperKernel::validate(src, dst, info);
+}
+
+bool CpuPoolingAssemblyDispatch::is_configured() const
+{
+    return _kernel != nullptr;
+}
+
+void CpuPoolingAssemblyDispatch::run(ITensorPack &tensors)
+{
+    ARM_COMPUTE_ERROR_ON_MSG(tensors.empty(), "No srcs provided");
+
+    tensors.add_tensor(TensorType::ACL_DST_1, &_workspace);
+
+    if(_is_global_pooling_layer)
+    {
+        NEScheduler::get().schedule_op(_kernel.get(), Window::DimX, _kernel->window(), tensors);
+    }
+    else
+    {
+        NEScheduler::get().schedule_op(_kernel.get(), Window::DimY, _kernel->window(), tensors);
+    }
+}
+} // namespace cpu
+} // namespace arm_compute
diff --git a/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.h b/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.h
new file mode 100644
index 0000000..353bbe1
--- /dev/null
+++ b/src/runtime/cpu/operators/CpuPoolingAssemblyDispatch.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef ARM_COMPUTE_CPU_POOLING_ASSEMBLY_DISPATCH_H
+#define ARM_COMPUTE_CPU_POOLING_ASSEMBLY_DISPATCH_H
+
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "src/runtime/cpu/ICpuOperator.h"
+
+namespace arm_compute
+{
+namespace cpu
+{
+class ITensor;
+
+/** Basic function to run pooling assembly kernels */
+class CpuPoolingAssemblyDispatch : public ICpuOperator
+{
+public:
+    /** Constructor */
+    CpuPoolingAssemblyDispatch(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+    /** Prevent instances of this class from being copied */
+    CpuPoolingAssemblyDispatch(const CpuPoolingAssemblyDispatch &) = delete;
+    /** Default move constructor */
+    CpuPoolingAssemblyDispatch(CpuPoolingAssemblyDispatch &&) = default;
+    /** Prevent instances of this class from being copied */
+    CpuPoolingAssemblyDispatch &operator=(const CpuPoolingAssemblyDispatch &) = delete;
+    /** Default move assignment operator */
+    CpuPoolingAssemblyDispatch &operator=(CpuPoolingAssemblyDispatch &&) = default;
+    /** Destructor */
+    ~CpuPoolingAssemblyDispatch();
+
+    /** If supported create an assembly routine, else fallback to Compute Library function.
+     *
+     * @param[in]  src  Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[out] dst  Destination tensor info to store the result of pooling. Data types supported: same as @p src.
+     * @param[in]  info Pooling meta-data
+     */
+    void configure(const ITensorInfo *src, ITensorInfo *dst, const PoolingLayerInfo &info);
+
+    /** Indicates whether or not this function can be used to process the given parameters.
+     *
+     * @param[in] src  Source tensor info. Data types supported: QASYMM8/QASYMM8_SIGNED/F16/F32.
+     * @param[in] dst  Destination tensor to store the result of pooling. Data types supported: same as @p src.
+     * @param[in] info Pooling meta-data
+     *
+     * @return a status.
+     */
+    static Status validate(const ITensorInfo *src, const ITensorInfo *dst, const PoolingLayerInfo &info);
+    /** Was the function successfully configured ?
+     *
+     * @return True if the function is configured and ready to run
+     */
+    bool is_configured() const;
+    // Run method overriden
+    void run(ITensorPack &tensors) override;
+
+private:
+    arm_compute::MemoryGroup _memory_group;
+
+    arm_compute::Tensor _workspace;
+    bool                _is_global_pooling_layer;
+};
+} // namespace cpu
+} // namespace arm_compute
+#endif /* ARM_COMPUTE_CPU_POOLING_ASSEMBLY_DISPATCH_H */