COMPMID-661: softmax-fp32 optimisation (#14)

Change-Id: I2007af1ed9dcf68065cf412aa50f73a2025b31a6
Reviewed-on: http://mpd-gerrit.cambridge.arm.com/94605
Reviewed-by: Gian Marco Iodice <gianmarco.iodice@arm.com>
Tested-by: Kaizen <jeremy.johnson+kaizengerrit@arm.com>
diff --git a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h
index 1e079cb..675c462 100644
--- a/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLSoftmaxLayerKernel.h
@@ -26,6 +26,8 @@
 
 #include "arm_compute/core/CL/ICLSimple3DKernel.h"
 
+#include <tuple>
+
 namespace arm_compute
 {
 class ICLTensor;
@@ -42,7 +44,7 @@
     void configure(const ICLTensor *input, ICLTensor *output);
 };
 
-/** Interface for shifting the logits values around the max value and exponentiating the result */
+/** Interface for shifting, exponentiating and summing the logits */
 class CLLogits1DShiftExpSumKernel : public ICLKernel
 {
 public:
@@ -60,9 +62,9 @@
      *
      * @param[in]  input  Source tensor. Data types supported: QS8/QS16/F16/F32
      * @param[in]  max    Max values tensor. Data types supported: same as @p input
-     * @param[in]  beta   A scaling factor for the exponent.
      * @param[out] output Destination tensor. Data types supported: same as @p input
      * @param[out] sum    Sum of 1D logits tensor. Data types supported: same as @p input
+     * @param[in]  beta   (Optional) A scaling factor for the exponent. Defaults to 1.f
      */
     void configure(const ICLTensor *input, const ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f);
 
@@ -76,6 +78,58 @@
     ICLTensor       *_sum;
 };
 
+/** Interface for max, shifting, exponentiating and summing the logits */
+class CLLogits1DMaxShiftExpSumKernel : public ICLKernel
+{
+public:
+    using ParallelReductionInfo = std::tuple<bool, unsigned int>;
+
+public:
+    /** Default constructor */
+    CLLogits1DMaxShiftExpSumKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLLogits1DMaxShiftExpSumKernel(const CLLogits1DMaxShiftExpSumKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLLogits1DMaxShiftExpSumKernel &operator=(const CLLogits1DMaxShiftExpSumKernel &) = delete;
+    /** Allow instances of this class to be moved */
+    CLLogits1DMaxShiftExpSumKernel(CLLogits1DMaxShiftExpSumKernel &&) = default;
+    /** Allow instances of this class to be moved */
+    CLLogits1DMaxShiftExpSumKernel &operator=(CLLogits1DMaxShiftExpSumKernel &&) = default;
+    /** Set the input and output tensors.
+     *
+     * @param[in]     input  Source tensor. Data types supported: QS8/QS16/F16/F32
+     * @param[in,out] max    Max values tensor. Data types supported: same as @p input
+     * @param[out]    output Destination tensor. Data types supported: same as @p input
+     * @param[out]    sum    Sum of 1D logits tensor. Data types supported: same as @p input
+     * @param[in]     beta   (Optional) A scaling factor for the exponent. Defaults to 1.f
+     */
+    void configure(const ICLTensor *input, ICLTensor *max, ICLTensor *output, ICLTensor *sum, float beta = 1.0f);
+    /** Checks if the given size is eligible for parallel reduction
+     *
+     * @note  Serial reduction is launched for width < (_grid_size * _serial_vector_size).
+     * @note  Parallel reduction is launched for width >= (_grid_size * _serial_vector_size) and vector_size is forced to 4.
+     *
+     * @param[in] size Size to check
+     *
+     * @return A two-element tuple where the first element is a boolean specifying is a parallel reduction will be run,
+     *         while the second elements is the vector size of the execution.
+     */
+    static ParallelReductionInfo is_parallel_reduction(size_t size);
+
+    // Inherited methods overridden:
+    void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+    const ICLTensor *_input;
+    ICLTensor       *_max;
+    ICLTensor       *_output;
+    ICLTensor       *_sum;
+
+private:
+    static const unsigned int _grid_size;
+    static const unsigned int _serial_vector_size;
+    static const unsigned int _parallel_vector_size;
+};
 /** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */
 class CLLogits1DNormKernel : public ICLKernel
 {
diff --git a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h
index d84297e..72ef679 100644
--- a/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h
+++ b/arm_compute/runtime/CL/functions/CLSoftmaxLayer.h
@@ -54,8 +54,8 @@
     /** Set the input and output tensors.
      *
      * @param[in]  input  Source tensor. Data types supported: QS8/QS16/F16/F32
-     * @param[in]  beta   A scaling factor for the exponent.
      * @param[out] output Destination tensor. Data types supported: same as @p input
+     * @param[in]  beta   (Optional) A scaling factor for the exponent. Defaults to 1.f
      */
     void configure(const ICLTensor *input, ICLTensor *output, float beta = 1.0f);
 
@@ -63,13 +63,15 @@
     void run() override;
 
 private:
-    CLMemoryGroup               _memory_group;
-    CLLogits1DMaxKernel         _max_kernel;
-    CLLogits1DShiftExpSumKernel _shift_exp_sum_kernel;
-    CLLogits1DNormKernel        _norm_kernel;
-    CLTensor                    _max;
-    CLTensor                    _sum;
-    CLTensor                    _tmp;
+    CLMemoryGroup                  _memory_group;
+    CLLogits1DMaxKernel            _max_kernel;
+    CLLogits1DShiftExpSumKernel    _shift_exp_sum_kernel;
+    CLLogits1DMaxShiftExpSumKernel _max_shift_exp_sum_kernel;
+    CLLogits1DNormKernel           _norm_kernel;
+    CLTensor                       _max;
+    CLTensor                       _sum;
+    CLTensor                       _tmp;
+    bool                           _run_legacy_path;
 };
 }
 #endif /* __ARM_COMPUTE_CLSOFTMAXLAYER_H__ */