Enable kernel selection testing (Phase #2)

Resolves COMPMID-4987
Change-Id: I1201ca3eae107989d13b6a2c6d9560de24fe112d
Signed-off-by: Yair Schwarzbaum <yair.schwarzbaum@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/7015
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/cpu/kernels/CpuSoftmaxKernel.cpp b/src/cpu/kernels/CpuSoftmaxKernel.cpp
index 054adfa..6766b10 100644
--- a/src/cpu/kernels/CpuSoftmaxKernel.cpp
+++ b/src/cpu/kernels/CpuSoftmaxKernel.cpp
@@ -22,7 +22,6 @@
  * SOFTWARE.
  */
 #include "src/cpu/kernels/CpuSoftmaxKernel.h"
-
 #include "arm_compute/core/Error.h"
 #include "arm_compute/core/Helpers.h"
 #include "arm_compute/core/ITensor.h"
@@ -30,12 +29,10 @@
 #include "arm_compute/core/Validate.h"
 #include "arm_compute/core/Window.h"
 #include "src/core/CPP/Validate.h"
+#include "src/core/common/Registrars.h"
 #include "src/core/helpers/AutoConfiguration.h"
 #include "src/core/helpers/WindowHelpers.h"
-
-#include "src/core/common/Registrars.h"
 #include "src/cpu/kernels/softmax/list.h"
-
 namespace arm_compute
 {
 namespace cpu
@@ -44,164 +41,60 @@
 {
 namespace
 {
-struct SoftmaxSelectorData
-{
-    DataType       dt;
-    const CPUInfo &ci;
-};
-using SoftmaxSelectorPtr          = std::add_pointer<bool(const SoftmaxSelectorData &data)>::type;
-using SoftmaxLogits1DMaxKernelPtr = std::add_pointer<void(const ITensor *, ITensor *, const Window &)>::type;
-using SoftmaxLogits1DKernelPtr    = std::add_pointer<void(const ITensor *, const ITensor *, void *const, ITensor *, float, bool, const Window &)>::type;
-
-struct SoftmaxLogits1DKernel
-{
-    const char              *name;
-    const SoftmaxSelectorPtr is_selected;
-    SoftmaxLogits1DKernelPtr ukernel;
-};
-
-struct SoftmaxLogits1DMaxKernel
-{
-    const char                 *name;
-    const SoftmaxSelectorPtr    is_selected;
-    SoftmaxLogits1DMaxKernelPtr ukernel;
-};
-
-static const SoftmaxLogits1DKernel available_logits_1d_kernels[] =
-{
-#if defined(ARM_COMPUTE_ENABLE_SVE)
-    {
-        "sve_fp32_softmax_logits_1d",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::F32) && data.ci.has_sve(); },
-        REGISTER_FP32_SVE(arm_compute::cpu::sve_fp32_softmax)
-    },
-    {
-        "sve_fp16_softmax_logits_1d",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::F16) && data.ci.has_sve(); },
-        REGISTER_FP16_SVE(arm_compute::cpu::sve_fp16_softmax)
-    },
-#endif /* defined(ARM_COMPUTE_ENABLE_SVE) */
-
-#if defined(ARM_COMPUTE_ENABLE_NEON)
-    {
-        "neon_fp32_softmax_logits_1d",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::F32); },
-        REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_softmax)
-    },
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-    {
-        "neon_fp16_softmax_logits_1d",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::F16); },
-        REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_softmax)
-    },
-#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) */
-#endif /* defined(ARM_COMPUTE_ENABLE_NEON) */
-
-#if defined(ARM_COMPUTE_ENABLE_SVE2)
-    {
-        "sve2_qu8_softmax_logits_1d",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::QASYMM8) && data.ci.has_sve2(); },
-        REGISTER_QASYMM8_SVE2(arm_compute::cpu::sve2_qasymm8_softmax)
-    },
-    {
-        "sve2_qs8_softmax_logits_1d",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED) && data.ci.has_sve2(); },
-        REGISTER_QASYMM8_SIGNED_SVE2(arm_compute::cpu::sve2_qasymm8_signed_softmax)
-    },
-#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
-#if defined(ARM_COMPUTE_ENABLE_NEON)
-    {
-        "neon_qu8_softmax_logits_1d",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::QASYMM8); },
-        REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_softmax)
-    },
-    {
-        "neon_qs8_softmax_logits_1d",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED); },
-        REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::neon_qasymm8_signed_softmax)
-    },
-#endif //defined(ARM_COMPUTE_ENABLE_NEON)
-};
-
-static const SoftmaxLogits1DMaxKernel available_logits_1d_max_kernels[] =
+/* Softmax Logits 1D Max - identifying the max value of 1D Logits  */
+static const std::vector<CpuLogits1DMaxKernel::SoftmaxLogits1DMaxKernel> available_kernels_max_logits =
 {
 #if defined(ARM_COMPUTE_ENABLE_SVE)
     {
         "sve_fp32_logits_1d_max",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::F32) && data.ci.has_sve(); },
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F32) && data.isa.sve; },
         REGISTER_FP32_SVE(arm_compute::cpu::sve_fp32_logits)
     },
     {
         "sve_fp16_logits_1d_max",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::F16) && data.ci.has_sve(); },
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16) && data.isa.sve; },
         REGISTER_FP16_SVE(arm_compute::cpu::sve_fp16_logits)
     },
     {
         "sve_qu8_logits_1d_max",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::QASYMM8) && data.ci.has_sve(); },
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8) && data.isa.sve; },
         REGISTER_QASYMM8_SVE(arm_compute::cpu::sve_qasymm8_logits)
     },
     {
         "sve_qs8_logits_1d_max",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED) && data.ci.has_sve(); },
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED) && data.isa.sve; },
         REGISTER_QASYMM8_SIGNED_SVE(arm_compute::cpu::sve_qasymm8_signed_logits)
     },
 #endif /* defined(ARM_COMPUTE_ENABLE_SVE) */
 #if defined(ARM_COMPUTE_ENABLE_NEON)
     {
         "neon_fp32_logits_1d_max",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::F32); },
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F32); },
         REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_logits)
     },
 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
     {
         "neon_fp16_logits_1d_max",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::F16); },
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16); },
         REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_logits)
     },
 #endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) */
     {
         "neon_qu8_logits_1d_max",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::QASYMM8); },
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8); },
         REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_logits)
     },
     {
         "neon_qs8_logits_1d_max",
-        [](const SoftmaxSelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED); },
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED); },
         REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::neon_qasymm8_singed_logits)
     },
 #endif /* defined(ARM_COMPUTE_ENABLE_NEON) */
 };
-
-const SoftmaxLogits1DKernel *get_implementation_logits(const SoftmaxSelectorData &data)
-{
-    for(const auto &uk : available_logits_1d_kernels)
-    {
-        if(uk.is_selected({ data.dt, CPUInfo::get() }))
-        {
-            return &uk;
-        }
-    }
-    return nullptr;
-}
-
-const SoftmaxLogits1DMaxKernel *get_implementation_logits_max(const SoftmaxSelectorData &data)
-{
-    for(const auto &uk : available_logits_1d_max_kernels)
-    {
-        if(uk.is_selected({ data.dt, CPUInfo::get() }))
-        {
-            return &uk;
-        }
-    }
-    return nullptr;
-}
-
 Status validate_arguments_logits_1d_max(const ITensorInfo &input, const ITensorInfo &output)
 {
     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input);
     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
-
     // Validate in case of configured output
     if(output.total_size() != 0)
     {
@@ -209,58 +102,104 @@
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(&input, &output);
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(output.tensor_shape(), TensorShape(input.tensor_shape()).set(0, 1));
     }
-
     return Status{};
 }
-
-} // namespace
-
+} //namespace
+const std::vector<CpuLogits1DMaxKernel::SoftmaxLogits1DMaxKernel> &CpuLogits1DMaxKernel::get_available_kernels()
+{
+    return available_kernels_max_logits;
+}
 void CpuLogits1DMaxKernel::configure(const ITensorInfo *src, ITensorInfo *dst)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_logits_1d_max(*src, *dst));
-
     // Softmax across the x dimension
     const TensorShape output_shape = TensorShape(src->tensor_shape()).set(0, 1);
     // Output auto initialization if not yet initialized
     auto_init_if_empty(*dst, output_shape, 1, src->data_type(), src->quantization_info());
-
-    const auto *uk = get_implementation_logits_max(SoftmaxSelectorData{ src->data_type(), CPUInfo::get() });
+    const auto *uk = get_implementation(DataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa() });
     ARM_COMPUTE_ERROR_ON_NULLPTR(uk);
-
     _run_method = uk->ukernel;
     _name       = std::string("CpuLogits1DMaxKernel").append("/").append(uk->name);
-
-    Window win = calculate_max_window(*src, Steps());
+    Window win  = calculate_max_window(*src, Steps());
     ICpuKernel::configure(win);
 }
-
 Status CpuLogits1DMaxKernel::validate(const ITensorInfo *src, const ITensorInfo *dst)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, dst);
     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_logits_1d_max(*src, *dst));
-
     return Status{};
 }
-
 void CpuLogits1DMaxKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
 {
     ARM_COMPUTE_UNUSED(info);
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
     ARM_COMPUTE_ERROR_ON(_run_method == nullptr);
-
     const auto src = tensors.get_const_tensor(TensorType::ACL_SRC);
     auto       dst = tensors.get_tensor(TensorType::ACL_DST);
-
     _run_method(src, dst, window);
 }
-
 const char *CpuLogits1DMaxKernel::name() const
 {
     return _name.c_str();
 }
 
+/* Softmax Logits 1D  - computation for QASYMM8 with pre-computed max.  */
+template <bool                                                                             IS_LOG>
+static const std::vector<typename CpuLogits1DSoftmaxKernel<IS_LOG>::SoftmaxLogits1DKernel> available_kernels_logits =
+{
+#if defined(ARM_COMPUTE_ENABLE_SVE)
+    {
+        "sve_fp32_softmax_logits_1d",
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F32) && data.isa.sve; },
+        REGISTER_FP32_SVE(arm_compute::cpu::sve_fp32_softmax)
+    },
+    {
+        "sve_fp16_softmax_logits_1d",
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16) && data.isa.sve; },
+        REGISTER_FP16_SVE(arm_compute::cpu::sve_fp16_softmax)
+    },
+#endif /* defined(ARM_COMPUTE_ENABLE_SVE) */
+#if defined(ARM_COMPUTE_ENABLE_NEON)
+    {
+        "neon_fp32_softmax_logits_1d",
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F32); },
+        REGISTER_FP32_NEON(arm_compute::cpu::neon_fp32_softmax)
+    },
+#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
+    {
+        "neon_fp16_softmax_logits_1d",
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::F16); },
+        REGISTER_FP16_NEON(arm_compute::cpu::neon_fp16_softmax)
+    },
+#endif /* defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) */
+#endif /* defined(ARM_COMPUTE_ENABLE_NEON) */
+#if defined(ARM_COMPUTE_ENABLE_SVE2)
+    {
+        "sve2_qu8_softmax_logits_1d",
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8) && data.isa.sve2; },
+        REGISTER_QASYMM8_SVE2(arm_compute::cpu::sve2_qasymm8_softmax)
+    },
+    {
+        "sve2_qs8_softmax_logits_1d",
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED) && data.isa.sve2; },
+        REGISTER_QASYMM8_SIGNED_SVE2(arm_compute::cpu::sve2_qasymm8_signed_softmax)
+    },
+#endif /* defined(ARM_COMPUTE_ENABLE_SVE2) */
+#if defined(ARM_COMPUTE_ENABLE_NEON)
+    {
+        "neon_qu8_softmax_logits_1d",
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8); },
+        REGISTER_QASYMM8_NEON(arm_compute::cpu::neon_qasymm8_softmax)
+    },
+    {
+        "neon_qs8_softmax_logits_1d",
+        [](const DataTypeISASelectorData & data) { return (data.dt == DataType::QASYMM8_SIGNED); },
+        REGISTER_QASYMM8_SIGNED_NEON(arm_compute::cpu::neon_qasymm8_signed_softmax)
+    },
+#endif //defined(ARM_COMPUTE_ENABLE_NEON)
+};
 namespace
 {
 Status validate_arguments_logits_softmax(const ITensorInfo &src, const ITensorInfo &max,
@@ -270,14 +209,11 @@
     // Check input
     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&src);
     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&src, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::F16, DataType::F32);
-
     const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(src.data_type());
-
     // Check max
     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&src, &max);
     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(TensorShape(src.tensor_shape()).set(0, 1), max.tensor_shape());
     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_QUANTIZATION_INFO(&src, &max);
-
     // Check output if configured
     if(dst.total_size() != 0)
     {
@@ -286,7 +222,6 @@
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &dst);
         ARM_COMPUTE_RETURN_ERROR_ON(dst.quantization_info() != output_quantization);
     }
-
     // Check tmp if configured
     if(tmp.total_size() != 0)
     {
@@ -296,84 +231,69 @@
         // on the maximum number of threads that will run in parallel.
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(&src, &tmp);
     }
-
     return Status{};
 }
 } // namespace
-
+template <bool IS_LOG>
+const std::vector<typename CpuLogits1DSoftmaxKernel<IS_LOG>::SoftmaxLogits1DKernel> &CpuLogits1DSoftmaxKernel<IS_LOG>::get_available_kernels()
+{
+    return available_kernels_logits<IS_LOG>;
+}
 template <bool IS_LOG>
 void CpuLogits1DSoftmaxKernel<IS_LOG>::configure(const ITensorInfo *src, const ITensorInfo *max, ITensorInfo *dst, const float beta, ITensorInfo *tmp)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, max, dst, tmp);
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_logits_softmax(*src, *max, *dst, beta, *tmp, IS_LOG));
-
     // Configure kernel window
     const bool is_quantized_asymmetric = is_data_type_quantized_asymmetric(src->data_type());
-
     // Output auto initialization if not yet initialized
     const QuantizationInfo output_quantization = is_quantized_asymmetric ? arm_compute::get_softmax_output_quantization_info(src->data_type(), IS_LOG) : dst->quantization_info();
     auto_init_if_empty(*dst, TensorInfo(*src).set_quantization_info(output_quantization).reset_padding());
-
     // Tmp auto initialization if not yet initialized
     const DataType tmp_data_type = is_quantized_asymmetric ? DataType::F32 : src->data_type();
     auto_init_if_empty(*tmp, TensorInfo(*src).set_data_type(tmp_data_type).reset_padding());
-
-    const auto *uk = get_implementation_logits(SoftmaxSelectorData{ src->data_type(), CPUInfo::get() });
+    const auto *uk = CpuLogits1DSoftmaxKernel<IS_LOG>::get_implementation(DataTypeISASelectorData{ src->data_type(), CPUInfo::get().get_isa() });
     ARM_COMPUTE_ERROR_ON_NULLPTR(uk);
-
     std::string kernel_name = IS_LOG ? std::string("CpuLogits1DLogSoftmaxKernel") : std::string("CpuLogits1DSoftmaxKernel");
-
-    _beta       = beta;
-    _run_method = uk->ukernel;
-    _name       = kernel_name.append("/").append(uk->name);
-
+    _beta                   = beta;
+    _run_method             = uk->ukernel;
+    _name                   = kernel_name.append("/").append(uk->name);
     // Configure kernel window
     Window win = calculate_max_window(*max, Steps());
-
-    ICpuKernel::configure(win);
+    ICPPKernel::configure(win);
 }
-
 template <bool IS_LOG>
 Status CpuLogits1DSoftmaxKernel<IS_LOG>::validate(const ITensorInfo *src, const ITensorInfo *max,
                                                   const ITensorInfo *dst, const float beta, const ITensorInfo *tmp)
 {
     ARM_COMPUTE_ERROR_ON_NULLPTR(src, max, dst, tmp);
     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_logits_softmax(*src, *max, *dst, beta, *tmp, IS_LOG));
-
     return Status{};
 }
-
 template <bool IS_LOG>
 void CpuLogits1DSoftmaxKernel<IS_LOG>::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
 {
     ARM_COMPUTE_UNUSED(info);
     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
-    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICpuKernel::window(), window);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(ICPPKernel::window(), window);
     ARM_COMPUTE_ERROR_ON(_run_method == nullptr);
-
-    const auto src = tensors.get_const_tensor(TensorType::ACL_SRC_0);
-    auto       max = tensors.get_tensor(TensorType::ACL_SRC_1);
-    auto       dst = tensors.get_tensor(TensorType::ACL_DST_0);
-    auto       tmp = tensors.get_tensor(TensorType::ACL_DST_1);
-
+    const auto         src                               = tensors.get_const_tensor(TensorType::ACL_SRC_0);
+    auto               max                               = tensors.get_tensor(TensorType::ACL_SRC_1);
+    auto               dst                               = tensors.get_tensor(TensorType::ACL_DST_0);
+    auto               tmp                               = tensors.get_tensor(TensorType::ACL_DST_1);
     const unsigned int num_elems_processed_per_iteration = src->info()->valid_region().shape.x();
     const unsigned int tmp_size_for_thread               = tmp->info()->element_size() * num_elems_processed_per_iteration;
-
     ARM_COMPUTE_ERROR_ON(tmp->info()->total_size() < (info.num_threads * tmp_size_for_thread));
-
     void *tmp_for_thread = tmp->buffer() + (info.thread_id * tmp_size_for_thread);
     _run_method(src, max, tmp_for_thread, dst, _beta, IS_LOG, window);
 }
-
 template <bool IS_LOG>
 const char    *CpuLogits1DSoftmaxKernel<IS_LOG>::name() const
 {
     return _name.c_str();
 }
-
 template class CpuLogits1DSoftmaxKernel<true>;
 template class CpuLogits1DSoftmaxKernel<false>;
-
 } // namespace kernels
 } // namespace cpu
 } // namespace arm_compute