COMPMID-2927: Add support for mixed precision in
CLInstanceNormalizationLayer

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I91482e2e4b723606aef76afef09a8277813e5d1b
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/2668
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
diff --git a/arm_compute/core/KernelDescriptors.h b/arm_compute/core/KernelDescriptors.h
index d009ccc..4b04beb 100644
--- a/arm_compute/core/KernelDescriptors.h
+++ b/arm_compute/core/KernelDescriptors.h
@@ -92,5 +92,30 @@
     int32_t  result_offset_after_shift{ 0 };        /**< Result offset used for quantizing */
     DataType output_data_type{ DataType::UNKNOWN }; /**< Output tensor data type to use if the output is not initialized */
 };
+
+struct InstanceNormalizationLayerKernelInfo
+{
+    /** Default constructor */
+    InstanceNormalizationLayerKernelInfo()
+        : InstanceNormalizationLayerKernelInfo(1.f, 0.f, 1e-12, true)
+    {
+    }
+    /** Constructor
+     *
+     * @param[in] gamma               The scale scalar value applied to the normalized tensor.
+     * @param[in] beta                The offset scalar value applied to the normalized tensor
+     * @param[in] epsilon             Lower bound value for the normalization.
+     * @param[in] use_mixed_precision Use mixed precision in case of FP16 execution.
+     */
+    InstanceNormalizationLayerKernelInfo(float gamma, float beta, float epsilon, bool use_mixed_precision)
+        : gamma(gamma), beta(beta), epsilon(epsilon), use_mixed_precision(use_mixed_precision)
+    {
+    }
+
+    float gamma;               /**< The scale scalar value applied to the normalized tensor. Defaults to 1.0 */
+    float beta;                /**< The offset scalar value applied to the normalized tensor. Defaults to 0.0 */
+    float epsilon;             /**< Lower bound value for the normalization. Defaults to 1e-12 */
+    bool  use_mixed_precision; /**< Use mixed precision in case of FP16 execution. Defaults to true */
+};
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_CORE_KERNEL_DESCRIPTORS_H */