Make Softmax kernels and operator stateless

COMPMID-3997

Change-Id: I3a3cc76d8247dd769d9a5e6e171d718ea909312c
Signed-off-by: Michalis Spyrou <michalis.spyrou@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4986
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/runtime/NEON/functions/NESoftmaxLayer.cpp b/src/runtime/NEON/functions/NESoftmaxLayer.cpp
index 6be34ad..3f1e43a 100644
--- a/src/runtime/NEON/functions/NESoftmaxLayer.cpp
+++ b/src/runtime/NEON/functions/NESoftmaxLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017-2020 Arm Limited.
+ * Copyright (c) 2017-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -22,49 +22,62 @@
  * SOFTWARE.
  */
 #include "arm_compute/runtime/NEON/functions/NESoftmaxLayer.h"
-
-#include "arm_compute/core/Helpers.h"
-#include "arm_compute/core/utils/misc/ShapeCalculator.h"
-#include "arm_compute/runtime/NEON/NEScheduler.h"
-#include "src/core/NEON/kernels/NEFillBorderKernel.h"
-#include "src/core/NEON/kernels/NESoftmaxLayerKernel.h"
-#include "src/core/NEON/kernels/NESoftmaxLayerKernel.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/runtime/Tensor.h"
+#include "src/core/cpu/kernels/CpuSoftmaxKernel.h"
 #include "src/core/helpers/SoftmaxHelpers.h"
+#include "src/runtime/cpu/operators/CpuSoftmax.h"
 
 namespace arm_compute
 {
 template <bool IS_LOG>
-NESoftmaxLayerGeneric<IS_LOG>::~NESoftmaxLayerGeneric() = default;
+struct NESoftmaxLayerGeneric<IS_LOG>::Impl
+{
+    const ITensor                                  *src{ nullptr };
+    ITensor                                        *dst{ nullptr };
+    Tensor                                          max{ nullptr };
+    Tensor                                          tmp{ nullptr };
+    Tensor                                          input_permuted{ nullptr };
+    Tensor                                          output_permuted{ nullptr };
+    std::unique_ptr<cpu::CpuSoftmaxGeneric<IS_LOG>> op{ nullptr };
+};
 
 template <bool IS_LOG>
 NESoftmaxLayerGeneric<IS_LOG>::NESoftmaxLayerGeneric(std::shared_ptr<IMemoryManager> memory_manager)
-    : _memory_group(std::move(memory_manager)), _permute_input(), _permute_output(), _max_kernel(), _softmax_kernel(), _fill_border_kernel(), _max(), _tmp(), _input_permuted(), _output_permuted(),
-      _needs_permute(false)
+    : _memory_group(std::move(memory_manager)), _impl(std::make_unique<Impl>())
 {
 }
 
 template <bool IS_LOG>
+NESoftmaxLayerGeneric<IS_LOG>::NESoftmaxLayerGeneric(NESoftmaxLayerGeneric &&) = default;
+template <bool                 IS_LOG>
+NESoftmaxLayerGeneric<IS_LOG> &NESoftmaxLayerGeneric<IS_LOG>::operator=(NESoftmaxLayerGeneric &&) = default;
+template <bool                 IS_LOG>
+NESoftmaxLayerGeneric<IS_LOG>::~NESoftmaxLayerGeneric() = default;
+
+template <bool IS_LOG>
 void NESoftmaxLayerGeneric<IS_LOG>::configure(ITensor *input, ITensor *output, float beta, int32_t axis)
 {
-    // Perform validation step
     ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
-    ARM_COMPUTE_ERROR_THROW_ON(NESoftmaxLayerGeneric::validate(input->info(), output->info(), beta, axis));
 
-    const unsigned int actual_axis = static_cast<unsigned int>(wrap_around(axis, static_cast<int32_t>(input->info()->num_dimensions())));
+    _impl->src = input;
+    _impl->dst = output;
+    _impl->op  = std::make_unique<cpu::CpuSoftmaxGeneric<IS_LOG>>();
+    _impl->op->configure(input->info(), output->info(), beta, axis);
 
-    _needs_permute = actual_axis > 0;
-
-    if(_needs_permute)
+    const unsigned int actual_axis   = static_cast<unsigned int>(wrap_around(axis, static_cast<int32_t>(input->info()->num_dimensions())));
+    const bool         needs_permute = actual_axis > 0;
+    if(needs_permute)
     {
         // Add to the memory manager _input_permuted
-        _memory_group.manage(&_input_permuted);
-
-        _permute_input.configure(input, &_input_permuted, softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis));
+        auto permute_input = std::make_unique<cpu::CpuPermute>();
+        _memory_group.manage(&_impl->input_permuted);
+        permute_input->configure(input->info(), _impl->input_permuted.info(), softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis));
     }
 
     // We want to deal with a 2D input. Either it is the permuted version of the original input (4D case)
     // or it is the original input case (2D case)
-    ITensor *tmp_input = (_needs_permute ? &_input_permuted : input);
+    ITensor *tmp_input = (needs_permute ? &_impl->input_permuted : input);
 
     // Create intermediate tensors shapes
     const TensorInfo input_info    = tmp_input->info()->clone()->reset_padding().set_is_resizable(true);
@@ -74,80 +87,49 @@
     // Init intermediate tensors
     TensorShape max_sum_shape = tmp_input->info()->tensor_shape();
     max_sum_shape.set(0, 1);
-    _max.allocator()->init(input_info.clone()->set_tensor_shape(max_sum_shape));
-    _tmp.allocator()->init(tensor_info_tmp);
+    _impl->max.allocator()->init(input_info.clone()->set_tensor_shape(max_sum_shape));
+    _impl->tmp.allocator()->init(tensor_info_tmp);
 
     // Manage intermediate buffers
-    _memory_group.manage(&_max);
-    _memory_group.manage(&_tmp);
+    _memory_group.manage(&_impl->max);
+    _memory_group.manage(&_impl->tmp);
 
     // Configure kernels
-    _max_kernel     = std::make_unique<NELogits1DMaxKernel>();
-    _softmax_kernel = std::make_unique<NELogits1DSoftmaxKernel<IS_LOG>>();
-    _max_kernel->configure(tmp_input, &_max);
-    if(_needs_permute)
+    auto max_kernel     = std::make_unique<cpu::kernels::CpuLogits1DMaxKernel>();
+    auto softmax_kernel = std::make_unique<cpu::kernels::CpuLogits1DSoftmaxKernel<IS_LOG>>();
+    max_kernel->configure(tmp_input->info(), _impl->max.info());
+
+    if(needs_permute)
     {
+        auto permute_output = std::make_unique<cpu::CpuPermute>();
         // Add to the memory manager _output_permuted
-        _memory_group.manage(&_output_permuted);
+        _memory_group.manage(&_impl->output_permuted);
 
         // The normalization kernel stores the result in a permuted output tensor
-        _softmax_kernel->configure(tmp_input, &_max, &_output_permuted, beta, &_tmp);
-        _input_permuted.allocator()->allocate();
+        softmax_kernel->configure(tmp_input->info(), _impl->max.info(), _impl->output_permuted.info(), beta, _impl->tmp.info());
+        _impl->input_permuted.allocator()->allocate();
 
         // Re-permute the permuted output into the requested (4D) output
-        _permute_output.configure(&_output_permuted, output, softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis));
+        permute_output->configure(_impl->output_permuted.info(), output->info(), softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis));
 
         // Allocate the intermediate permuted tensors
-        _output_permuted.allocator()->allocate();
+        _impl->output_permuted.allocator()->allocate();
     }
     else
     {
-        // Softmax 2D case
-        _fill_border_kernel = std::make_unique<NEFillBorderKernel>();
-        _fill_border_kernel->configure(tmp_input, _max_kernel->border_size(), BorderMode::REPLICATE);
-        _softmax_kernel->configure(tmp_input, &_max, output, beta, &_tmp);
+        softmax_kernel->configure(tmp_input->info(), _impl->max.info(), output->info(), beta, _impl->tmp.info());
     }
 
     // Allocate intermediate buffers
-    _max.allocator()->allocate();
-    _tmp.allocator()->allocate();
+    _impl->max.allocator()->allocate();
+    _impl->tmp.allocator()->allocate();
 }
 
 template <bool IS_LOG>
 Status NESoftmaxLayerGeneric<IS_LOG>::validate(const ITensorInfo *input, const ITensorInfo *output, float beta, int32_t axis)
 {
-    // Perform validation step
     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
-    ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_dimensions() > 4, "Only up to 4 dimensions are supported");
-    ARM_COMPUTE_UNUSED(beta);
-    ARM_COMPUTE_RETURN_ERROR_ON(axis < static_cast<int32_t>(-input->num_dimensions()) || static_cast<int32_t>(input->num_dimensions()) <= axis);
-
-    // Create intermediate tensor info
-    DataType         tmp_data_type = input->data_type();
-    const TensorInfo tensor_info_tmp(input->clone()->set_data_type(tmp_data_type).set_is_resizable(true));
-
-    TensorShape max_sum_shape = input->tensor_shape();
-    max_sum_shape.set(0, 1);
-    const TensorInfo tensor_info_max_sum(input->clone()->set_tensor_shape(max_sum_shape).set_data_type(tmp_data_type).set_quantization_info(input->quantization_info()).set_is_resizable(true));
-    const TensorInfo dont_care;
-
-    const unsigned int actual_axis = static_cast<unsigned int>(wrap_around(axis, static_cast<int32_t>(input->num_dimensions())));
-
-    const bool needs_permute = actual_axis > 0;
-
-    if(needs_permute)
-    {
-        const PermutationVector permutation_vector = softmax_helpers::get_permutation_vector_from_softmax_axis(actual_axis);
-        const TensorShape       permuted_shape     = misc::shape_calculator::compute_permutation_output_shape(*input, permutation_vector);
-        TensorInfo              input_permuted(input->clone()->set_tensor_shape(permuted_shape));
-        ARM_COMPUTE_RETURN_ON_ERROR(NEPermute::validate(input, &input_permuted, permutation_vector));
-        TensorInfo output_permuted(output->clone()->set_tensor_shape(permuted_shape));
-        ARM_COMPUTE_RETURN_ON_ERROR(NEPermute::validate(&output_permuted, output, permutation_vector));
-    }
-
-    ARM_COMPUTE_RETURN_ON_ERROR(NELogits1DMaxKernel::validate(input, &tensor_info_max_sum));
-    ARM_COMPUTE_RETURN_ON_ERROR(NELogits1DSoftmaxKernel<IS_LOG>::validate(&tensor_info_tmp, &tensor_info_max_sum, output, beta, &dont_care));
-
+    ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuSoftmaxGeneric<IS_LOG>::validate(input, output, beta, axis));
     return Status{};
 }
 
@@ -155,23 +137,14 @@
 void           NESoftmaxLayerGeneric<IS_LOG>::run()
 {
     MemoryGroupResourceScope scope_mg(_memory_group);
-
-    if(_needs_permute)
-    {
-        _permute_input.run();
-    }
-    else
-    {
-        NEScheduler::get().schedule(_fill_border_kernel.get(), Window::DimY);
-    }
-
-    NEScheduler::get().schedule(_max_kernel.get(), Window::DimY);
-    NEScheduler::get().schedule(_softmax_kernel.get(), Window::DimY);
-
-    if(_needs_permute)
-    {
-        _permute_output.run();
-    }
+    ITensorPack              pack;
+    pack.add_tensor(TensorType::ACL_SRC, _impl->src);
+    pack.add_tensor(TensorType::ACL_DST, _impl->dst);
+    pack.add_tensor(TensorType::ACL_INT_0, &_impl->tmp);
+    pack.add_tensor(TensorType::ACL_INT_1, &_impl->max);
+    pack.add_tensor(TensorType::ACL_INT_2, &_impl->input_permuted);
+    pack.add_tensor(TensorType::ACL_INT_3, &_impl->output_permuted);
+    _impl->op->run(pack);
 }
 
 template class NESoftmaxLayerGeneric<false>;