IVGCVSW-631 Neon support for Softmax beta parameter (F32 only)

Change-Id: Ibf6f038b39f1a4e557f5d04feb08e3d5ef54e223
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/112019
Tested-by: BSG Visual Compute Jenkins server to access repositories on http://mpd-gerrit.cambridge.arm.com <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/core/Helpers.h b/arm_compute/core/Helpers.h
index 1be24e1..fdbb46f 100644
--- a/arm_compute/core/Helpers.h
+++ b/arm_compute/core/Helpers.h
@@ -501,7 +501,7 @@
  *
  * @return True if the tensor info has been initialized
  */
-bool auto_init_if_empty(ITensorInfo &info_sink, ITensorInfo &info_source);
+bool auto_init_if_empty(ITensorInfo &info_sink, const ITensorInfo &info_source);
 
 /* Set the shape to the specified value if the current assignment is empty.
  *
diff --git a/arm_compute/core/Helpers.inl b/arm_compute/core/Helpers.inl
index 1e56534..3672692 100644
--- a/arm_compute/core/Helpers.inl
+++ b/arm_compute/core/Helpers.inl
@@ -217,7 +217,7 @@
     return false;
 }
 
-inline bool auto_init_if_empty(ITensorInfo &info_sink, ITensorInfo &info_source)
+inline bool auto_init_if_empty(ITensorInfo &info_sink, const ITensorInfo &info_source)
 {
     if(info_sink.tensor_shape().total_size() == 0)
     {
diff --git a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
index cce2156..c3e2518 100644
--- a/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NESoftmaxLayerKernel.h
@@ -78,14 +78,15 @@
      * @param[in]  max    Max values tensor. Data types supported: same as @p input.
      * @param[out] output Destination tensor. Data types supported: same as @p input.
      * @param[out] sum    Sum of 1D logits tensor. Data types supported: same as @p input.
+     * @param[in]  beta   (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
      */
-    void configure(const ITensor *input, const ITensor *max, ITensor *output, ITensor *sum);
+    void configure(const ITensor *input, const ITensor *max, ITensor *output, ITensor *sum, float beta = 1.0f);
 
     // Inherited methods overridden:
     void run(const Window &window, const ThreadInfo &info) override;
 
 private:
-    using Logits1DShiftExpSumFunction = void(const ITensor *in, const ITensor *max, ITensor *out, ITensor *sum, const Window &window);
+    using Logits1DShiftExpSumFunction = void(const ITensor *in, const ITensor *max, ITensor *out, ITensor *sum, const Window &window, float beta);
 
 private:
     Logits1DShiftExpSumFunction *_func;
@@ -93,6 +94,7 @@
     const ITensor               *_max;
     ITensor                     *_output;
     ITensor                     *_sum;
+    float                        _beta;
 };
 
 /** Interface for calculating the final step of the Softmax Layer where each logit value is multiplied by the inverse of the sum of the logits. */
diff --git a/arm_compute/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.h b/arm_compute/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.h
index 19bfb83..e7f8d50 100644
--- a/arm_compute/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.h
+++ b/arm_compute/runtime/GLES_COMPUTE/functions/GCSoftmaxLayer.h
@@ -51,8 +51,9 @@
      *
      * @param[in]  input  Source tensor. Data types supported: F16/F32
      * @param[out] output Destination tensor. Data types supported: same as @p input
+     * @param[in]  beta   (Optional) A scaling factor for the exponent. Only beta = 1 is supported.
      */
-    void configure(const IGCTensor *input, IGCTensor *output);
+    void configure(const IGCTensor *input, IGCTensor *output, float beta = 1.0f);
 
     // Inherited methods overridden:
     void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
index a265f70..38a0f21 100644
--- a/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
+++ b/arm_compute/runtime/NEON/functions/NESoftmaxLayer.h
@@ -53,8 +53,9 @@
      *
      * @param[in]  input  Source tensor. Data types supported: QS8/QS16/F16/F32.
      * @param[out] output Destination tensor. Data types supported: same as @p input.
+     * @param[in]  beta   (Optional) A scaling factor for the exponent. QS8/QS16 only support a beta value of 1.
      */
-    void configure(ITensor *input, ITensor *output);
+    void configure(ITensor *input, ITensor *output, float beta = 1.0f);
 
     // Inherited methods overridden:
     void run() override;