COMPMID-2927: Add support for mixed precision in
CLInstanceNormalizationLayer

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I91482e2e4b723606aef76afef09a8277813e5d1b
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2668
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
diff --git a/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
index cf726d8..9982cc2 100644
--- a/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019 ARM Limited.
+ * Copyright (c) 2019-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -26,8 +26,11 @@
 
 #include "arm_compute/core/CL/ICLKernel.h"
 
+#include "arm_compute/core/KernelDescriptors.h"
+
 namespace arm_compute
 {
+// Forward declarations
 class ICLTensor;
 
 /** Interface for performing an instance normalization */
@@ -49,26 +52,22 @@
 
     /** Set the input and output tensors.
      *
-     * @param[in, out] input   Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
-     *                         In case of @p output tensor = nullptr this tensor will store the result of the normalization.
-     * @param[out]     output  Destination tensor. Data types and data layouts supported: same as @p input.
-     * @param[in]      gamma   (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
-     * @param[in]      beta    (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
-     * @param[in]      epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
+     * @param[in, out] input  Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
+     *                        In case of @p output tensor = nullptr this tensor will store the result of the normalization.
+     * @param[out]     output Destination tensor. Data types and data layouts supported: same as @p input.
+     * @param[in]      info   Kernel meta-data descriptor
      */
-    void configure(ICLTensor *input, ICLTensor *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f);
+    void configure(ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info);
 
     /** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
      *
-     * @param[in] input   Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW
-     * @param[in] output  Destination tensor info. Data types and data layouts supported: same as @p input.
-     * @param[in] gamma   (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
-     * @param[in] beta    (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
-     * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
+     * @param[in] input  Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW
+     * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
+     * @param[in] info   Kernel meta-data descriptor
      *
      * @return a status
      */
-    static Status validate(const ITensorInfo *input, const ITensorInfo *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f);
+    static Status validate(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info);
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
@@ -76,9 +75,6 @@
 private:
     ICLTensor *_input;
     ICLTensor *_output;
-    float      _gamma;
-    float      _beta;
-    float      _epsilon;
     bool       _run_in_place;
 };
 } // namespace arm_compute
diff --git a/arm_compute/core/KernelDescriptors.h b/arm_compute/core/KernelDescriptors.h
index d009ccc..4b04beb 100644
--- a/arm_compute/core/KernelDescriptors.h
+++ b/arm_compute/core/KernelDescriptors.h
@@ -92,5 +92,30 @@
     int32_t  result_offset_after_shift{ 0 };        /**< Result offset used for quantizing */
     DataType output_data_type{ DataType::UNKNOWN }; /**< Output tensor data type to use if the output is not initialized */
 };
+
+struct InstanceNormalizationLayerKernelInfo
+{
+    /** Default constructor */
+    InstanceNormalizationLayerKernelInfo()
+        : InstanceNormalizationLayerKernelInfo(1.f, 0.f, 1e-12, true)
+    {
+    }
+    /** Constructor
+     *
+     * @param[in] gamma               The scale scalar value applied to the normalized tensor.
+     * @param[in] beta                The offset scalar value applied to the normalized tensor
+     * @param[in] epsilon             Lower bound value for the normalization.
+     * @param[in] use_mixed_precision Use mixed precision in case of FP16 execution.
+     */
+    InstanceNormalizationLayerKernelInfo(float gamma, float beta, float epsilon, bool use_mixed_precision)
+        : gamma(gamma), beta(beta), epsilon(epsilon), use_mixed_precision(use_mixed_precision)
+    {
+    }
+
+    float gamma;               /**< The scale scalar value applied to the normalized tensor. Defaults to 1.0 */
+    float beta;                /**< The offset scalar value applied to the normalized tensor. Defaults to 0.0 */
+    float epsilon;             /**< Lower bound value for the normalization. Defaults to 1e-12 */
+    bool  use_mixed_precision; /**< Use mixed precision in case of FP16 execution. Defaults to true */
+};
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_CORE_KERNEL_DESCRIPTORS_H */