COMPMID-2313: Implement CL INSTANCE_NORMALIZATION function

Change-Id: If11799bef1bbb973d4287ffc1c6eb4c2a28bbf5f
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/1989
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
diff --git a/arm_compute/core/CL/CLKernels.h b/arm_compute/core/CL/CLKernels.h
index e298247..3d9b2c8 100644
--- a/arm_compute/core/CL/CLKernels.h
+++ b/arm_compute/core/CL/CLKernels.h
@@ -106,6 +106,7 @@
 #include "arm_compute/core/CL/kernels/CLHeightConcatenateLayerKernel.h"
 #include "arm_compute/core/CL/kernels/CLHistogramKernel.h"
 #include "arm_compute/core/CL/kernels/CLIm2ColKernel.h"
+#include "arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h"
 #include "arm_compute/core/CL/kernels/CLIntegralImageKernel.h"
 #include "arm_compute/core/CL/kernels/CLL2NormalizeLayerKernel.h"
 #include "arm_compute/core/CL/kernels/CLLKTrackerKernel.h"
diff --git a/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
new file mode 100644
index 0000000..bc016d1
--- /dev/null
+++ b/arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYERKERNEL_H__
+#define __ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYERKERNEL_H__
+
+#include "arm_compute/core/CL/ICLKernel.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Interface for performing an instance normalization */
+class CLInstanceNormalizationLayerKernel : public ICLKernel
+{
+public:
+    /** Constructor */
+    CLInstanceNormalizationLayerKernel();
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLInstanceNormalizationLayerKernel(const CLInstanceNormalizationLayerKernel &) = delete;
+    /** Prevent instances of this class from being copied (As this class contains pointers) */
+    CLInstanceNormalizationLayerKernel &operator=(const CLInstanceNormalizationLayerKernel &) = delete;
+    /** Default Move Constructor. */
+    CLInstanceNormalizationLayerKernel(CLInstanceNormalizationLayerKernel &&) = default;
+    /** Default move assignment operator */
+    CLInstanceNormalizationLayerKernel &operator=(CLInstanceNormalizationLayerKernel &&) = default;
+    /** Default destructor */
+    ~CLInstanceNormalizationLayerKernel() = default;
+
+    /** Set the input and output tensors.
+     *
+     * @param[in, out] input   Source tensor. Data types supported: F16/F32. Data layout supported: NCHW
+     * @param[out]     output  Destination tensor. Data types and data layouts supported: same as @p input.
+     * @param[in]      gamma   (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
+     * @param[in]      beta    (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
+     * @param[in]      epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
+     */
+    void configure(ICLTensor *input, ICLTensor *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f);
+
+    /** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
+     *
+     * @param[in] input   Source tensor info. In case of @p output tensor = nullptr this tensor will store the result of the normalization.
+     *                    Data types supported: F16/F32. Data layout supported: NHWC, NCHW
+     * @param[in] output  Destination tensor info. Data types and data layouts supported: same as @p input.
+     * @param[in] gamma   (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
+     * @param[in] beta    (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
+     * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f);
+
+    // Inherited methods overridden:
+    void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+    ICLTensor *_input;
+    ICLTensor *_output;
+    float      _gamma;
+    float      _beta;
+    float      _epsilon;
+    bool       _run_in_place;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYERKERNEL_H__ */
diff --git a/arm_compute/runtime/CL/CLFunctions.h b/arm_compute/runtime/CL/CLFunctions.h
index 007ee70..e647819 100644
--- a/arm_compute/runtime/CL/CLFunctions.h
+++ b/arm_compute/runtime/CL/CLFunctions.h
@@ -90,6 +90,7 @@
 #include "arm_compute/runtime/CL/functions/CLHOGMultiDetection.h"
 #include "arm_compute/runtime/CL/functions/CLHarrisCorners.h"
 #include "arm_compute/runtime/CL/functions/CLHistogram.h"
+#include "arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h"
 #include "arm_compute/runtime/CL/functions/CLIntegralImage.h"
 #include "arm_compute/runtime/CL/functions/CLL2NormalizeLayer.h"
 #include "arm_compute/runtime/CL/functions/CLLSTMLayer.h"
diff --git a/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h b/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h
new file mode 100644
index 0000000..e13826a
--- /dev/null
+++ b/arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYER_H__
+#define __ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYER_H__
+
+#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
+
+namespace arm_compute
+{
+class ICLTensor;
+
+/** Basic function to perform a Instance normalization.
+ *
+ * This function runs the following kernels:
+ * -# @ref CLInstanceNormalizationLayerKernel
+ */
+class CLInstanceNormalizationLayer : public ICLSimpleFunction
+{
+public:
+    /** Default constructor */
+    CLInstanceNormalizationLayer();
+    /** Set the input and output tensors.
+     *
+     * @param[in, out] input   Source tensor. In case of @p output tensor = nullptr this tensor will store the result of the normalization.
+     *                         Data types supported: F16/F32. Data layout supported: NHWC, NCHW
+     * @param[out]     output  Destination tensor. Data types and data layouts supported: same as @p input.
+     * @param[in]      gamma   (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
+     * @param[in]      beta    (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
+     * @param[in]      epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
+     */
+    void configure(ICLTensor *input, ICLTensor *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f);
+
+    /** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
+     *
+     * @param[in] input   Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW
+     * @param[in] output  Destination tensor info. Data types and data layouts supported: same as @p input.
+     * @param[in] gamma   (Optional) The scale scalar value applied to the normalized tensor. Defaults to 1.0
+     * @param[in] beta    (Optional) The offset scalar value applied to the normalized tensor. Defaults to 0.0
+     * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *output, float gamma = 1.0f, float beta = 0.0f, float epsilon = 1e-12f);
+};
+} // namespace arm_compute
+#endif /* __ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYER_H__ */
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index 978e35f..fa5193f 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -371,6 +371,7 @@
     { "init_level", "optical_flow_pyramid_lk.cl" },
     { "init_level_max", "optical_flow_pyramid_lk.cl" },
     { "init_level_max_initial_estimate", "optical_flow_pyramid_lk.cl" },
+    { "instance_normalization", "instance_normalization.cl" },
     { "integral_horizontal", "integral_image.cl" },
     { "integral_vertical", "integral_image.cl" },
     { "IYUV_to_NV12_bt709", "color_convert.cl" },
@@ -823,6 +824,10 @@
 #include "./cl_kernels/im2col.clembed"
     },
     {
+        "instance_normalization.cl",
+#include "./cl_kernels/instance_normalization.clembed"
+    },
+    {
         "integral_image.cl",
 #include "./cl_kernels/integral_image.clembed"
     },
diff --git a/src/core/CL/cl_kernels/instance_normalization.cl b/src/core/CL/cl_kernels/instance_normalization.cl
new file mode 100644
index 0000000..699597e
--- /dev/null
+++ b/src/core/CL/cl_kernels/instance_normalization.cl
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "helpers.h"
+
+#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(GAMMA) && defined(BETA) && defined(EPSILON) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z)
+/** This function normalizes the input 2D tensor across the first dimension with respect to mean and standard deviation of the same dimension.
+ *
+ * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @attention Data type should be passed using the -DDATA_TYPE=data_type compile flag, e.g. -DDATA_TYPE=float
+ * @attention The scale scalar value applied to the normalized tensor should be passed using the -DGAMMA=value compile flag, e.g. -DGAMMA=1.3
+ * @attention The offset scalar value applied to the normalized tensor should be passed using the -DBETA=value compile flag, e.g. -DBETA=2.4
+ * @attention Normalization epsilon parameter should be given as a preprocessor argument with -DEPSILON=value. e.g. -DEPSILON=0.001f
+ * @attention Dimensions X, Y, and Z should be given as a preprocessor argument with -DDIM_X=value, -DDIM_Y=value, -DDIM_Z=value. e.g. -DDIM_X=6, -DDIM_Y=2, -DDIM_Z=7
+ *
+ * @param[in]  input_ptr                            Pointer to the first source tensor. Supported data types: F16/F32
+ * @param[in]  input_stride_x                       Stride of the first source tensor in X dimension (in bytes)
+ * @param[in]  input_step_x                         input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in]  input_stride_y                       Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in]  input_step_y                         input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in]  input_stride_z                       Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in]  input_step_z                         input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in]  input_offset_first_element_in_bytes  The offset of the first element in the first source tensor
+ * @param[out] output_ptr                           (Optional) Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in]  output_stride_x                      (Optional) Stride of the destination tensor in X dimension (in bytes)
+ * @param[in]  output_step_x                        (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in]  output_stride_y                      (Optional) Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in]  output_step_y                        (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in]  output_stride_z                      (Optional) Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in]  output_step_z                        (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in]  output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor
+ */
+__kernel void instance_normalization(
+    TENSOR4D_DECLARATION(input)
+#ifndef IN_PLACE
+    ,
+    TENSOR4D_DECLARATION(output)
+#endif /* IN_PLACE */
+)
+{
+    DATA_TYPE sum    = 0.f;
+    DATA_TYPE sum_sq = 0.f;
+
+#if defined(NHWC)
+
+    const int pc             = get_global_id(0);
+    const int pn             = get_global_id(2);
+    const int elements_plane = DIM_Y * DIM_Z;
+    const int elements_x_y   = DIM_X * DIM_Y;
+    const int elements_x_y_z = DIM_X * DIM_Y * DIM_Z;
+
+    for(int i_w = 0; i_w < DIM_Y; ++i_w)
+    {
+        for(int i_h = 0; i_h < DIM_Z; ++i_h)
+        {
+            DATA_TYPE data = *((__global DATA_TYPE *)input_ptr + pc + i_w * DIM_X + i_h * elements_x_y + pn * elements_x_y_z);
+            sum += data;
+            sum_sq += data * data;
+        }
+    }
+
+#else // !defined(NHWC)
+    const int elements_plane = DIM_X * DIM_Y;
+    const int plane_address  = get_global_id(2) * elements_plane;
+    int       i              = 0;
+
+    VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+    part_sum = 0.f;
+    VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+    part_sum_sq = 0.f;
+    // Calculate partial sum
+    for(; i <= (elements_plane - VEC_SIZE); i += VEC_SIZE)
+    {
+        // Load data
+        VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+        data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_ptr + i + plane_address);
+        part_sum += data;
+        part_sum_sq += data * data;
+    }
+    // Perform reduction
+#if VEC_SIZE > 8
+    part_sum.s01234567 += part_sum.s89abcdef;
+    part_sum_sq.s01234567 += part_sum_sq.s89abcdef;
+#endif // VEC_SIZE > 8
+#if VEC_SIZE > 4
+    part_sum.s0123 += part_sum.s4567;
+    part_sum_sq.s0123 += part_sum_sq.s4567;
+#endif // VEC_SIZE > 4
+#if VEC_SIZE > 2
+    part_sum.s01 += part_sum.s23;
+    part_sum_sq.s01 += part_sum_sq.s23;
+#endif // VEC_SIZE > 2
+    part_sum.s0 += part_sum.s1;
+    part_sum_sq.s0 += part_sum_sq.s1;
+    // Left-overs loop
+    for(; i < elements_plane; ++i)
+    {
+        DATA_TYPE data = *((__global DATA_TYPE *)input_ptr + i + plane_address);
+        part_sum.s0 += data;
+        part_sum_sq.s0 += data * data;
+    }
+
+    sum    = part_sum.s0;
+    sum_sq = part_sum_sq.s0;
+
+#endif // defined(NHWC)
+
+    const DATA_TYPE mean_float   = ((float)sum / elements_plane);
+    const DATA_TYPE mean         = (DATA_TYPE)mean_float;
+    const float     var_float    = ((float)sum_sq / elements_plane) - (mean_float * mean_float);
+    const float     multip_float = GAMMA / sqrt(var_float + EPSILON);
+    const DATA_TYPE multip       = (DATA_TYPE)multip_float;
+
+#if defined(NHWC)
+
+    for(int i_w = 0; i_w < DIM_Y; ++i_w)
+    {
+        for(int i_h = 0; i_h < DIM_Z; ++i_h)
+        {
+            __global DATA_TYPE *input_address = (__global DATA_TYPE *)input_ptr + pc + i_w * DIM_X + i_h * elements_x_y + pn * elements_x_y_z;
+#ifdef IN_PLACE
+            __global DATA_TYPE *output_address = input_address;
+#else  /* !IN_PLACE */
+            __global DATA_TYPE *output_address = (__global DATA_TYPE *)output_ptr + pc + i_w * DIM_X + i_h * elements_x_y + pn * elements_x_y_z;
+#endif /* IN_PLACE */
+            *(output_address) = (*(input_address) - mean) * multip + (DATA_TYPE)BETA;
+        }
+    }
+
+#else // !defined(NHWC)
+    i      = 0;
+    for(; i <= (elements_plane - VEC_SIZE); i += VEC_SIZE)
+    {
+        __global DATA_TYPE *input_address  = (__global DATA_TYPE *)input_ptr + i + plane_address;
+#ifdef IN_PLACE
+        __global DATA_TYPE *output_address = input_address;
+#else  /* !IN_PLACE */
+        __global DATA_TYPE *output_address = (__global DATA_TYPE *)output_ptr + i + plane_address;
+#endif /* IN_PLACE */
+
+        VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+        data = VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)input_address);
+
+        VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+        res = (data - mean) * multip + (DATA_TYPE)BETA;
+        VSTORE(VEC_SIZE)
+        (res, 0, (__global DATA_TYPE *)output_address);
+    }
+    for(; i < elements_plane; ++i)
+    {
+        __global DATA_TYPE *input_address  = (__global DATA_TYPE *)input_ptr + i + plane_address;
+#ifdef IN_PLACE
+        __global DATA_TYPE *output_address = input_address;
+#else  /* !IN_PLACE */
+        __global DATA_TYPE *output_address = (__global DATA_TYPE *)output_ptr + i + plane_address;
+#endif /* IN_PLACE */
+        *(output_address)                  = (*(input_address) - mean) * multip + (DATA_TYPE)BETA;
+    }
+#endif // defined(NHWC)
+}
+#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) && defined(GAMMA) && defined(BETA) && defined(EPSILON) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z) */
diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
new file mode 100644
index 0000000..a03322b
--- /dev/null
+++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h"
+
+#include "arm_compute/core/CL/CLHelpers.h"
+#include "arm_compute/core/CL/CLKernelLibrary.h"
+#include "arm_compute/core/CL/CLValidate.h"
+#include "arm_compute/core/CL/ICLTensor.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Window.h"
+
+#include "support/ToolchainSupport.h"
+
+namespace arm_compute
+{
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon)
+{
+    ARM_COMPUTE_UNUSED(gamma);
+    ARM_COMPUTE_UNUSED(beta);
+    ARM_COMPUTE_RETURN_ERROR_ON_MSG(epsilon == 0.f, "Epsilon must be different than 0");
+
+    ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
+
+    if(output != nullptr && output->total_size() != 0)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
+    }
+
+    return Status{};
+}
+
+std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
+{
+    // We handle the planes manually
+    Window win = calculate_max_window(*input, Steps(1));
+
+    // Output auto initialization if not yet initialized
+    auto_init_if_empty(*output, input->tensor_shape(), 1, input->data_type());
+
+    // CLInstanceNormalizationLayerKernel doesn't need padding so update_window_and_padding() can be skipped
+    Coordinates coord;
+    coord.set_num_dimensions(output->num_dimensions());
+    output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
+    return std::make_pair(Status{}, win);
+}
+} // namespace
+
+CLInstanceNormalizationLayerKernel::CLInstanceNormalizationLayerKernel()
+    : _input(nullptr), _output(nullptr), _gamma(1), _beta(0), _epsilon(1e-12), _run_in_place(false)
+{
+}
+
+void CLInstanceNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, float gamma, float beta, float epsilon)
+{
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+
+    _input   = input;
+    _output  = output == nullptr ? input : output;
+    _gamma   = gamma;
+    _beta    = beta;
+    _epsilon = epsilon;
+
+    _run_in_place = (output == nullptr) || (output == input);
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(_input->info(), _output->info(), gamma, beta, epsilon));
+    const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
+
+    CLBuildOptions build_opts;
+    build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+    build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+    build_opts.add_option("-DDIM_X=" + support::cpp11::to_string(input->info()->dimension(0)));
+    build_opts.add_option("-DDIM_Y=" + support::cpp11::to_string(input->info()->dimension(1)));
+    build_opts.add_option("-DDIM_Z=" + support::cpp11::to_string(input->info()->dimension(2)));
+    build_opts.add_option("-DGAMMA=" + float_to_string_with_full_precision(gamma));
+    build_opts.add_option("-DBETA=" + float_to_string_with_full_precision(beta));
+    build_opts.add_option("-DEPSILON=" + float_to_string_with_full_precision(epsilon));
+    build_opts.add_option_if(_run_in_place, "-DIN_PLACE");
+    build_opts.add_option_if(_input->info()->data_layout() == DataLayout::NHWC, "-DNHWC");
+
+    // Create kernel
+    _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("instance_normalization", build_opts.options()));
+
+    // Configure kernel window
+    auto win_config = validate_and_configure_window(_input->info(), _output->info());
+    ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
+    ICLKernel::configure_internal(std::get<1>(win_config));
+}
+
+Status CLInstanceNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, gamma, beta, epsilon));
+    ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), (output == nullptr ? input->clone().get() : output->clone().get()))));
+    return Status{};
+}
+
+void CLInstanceNormalizationLayerKernel::run(const Window &window, cl::CommandQueue &queue)
+{
+    ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+    ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+    Window collapsed_window = window.collapse(window, Window::DimZ);
+
+    // We will process the planes together
+    if(_input->info()->data_layout() == DataLayout::NCHW)
+    {
+        collapsed_window.set(Window::DimX, Window::Dimension(0, 1, 1));
+        collapsed_window.set(Window::DimY, Window::Dimension(0, 1, 1));
+    }
+    else
+    {
+        collapsed_window.set(Window::DimY, Window::Dimension(0, 1, 1));
+        collapsed_window.set(Window::DimZ, Window::Dimension(0, _input->info()->dimension(3), 1));
+    }
+
+    unsigned int idx = 0;
+    add_4D_tensor_argument(idx, _input, collapsed_window);
+    if(!_run_in_place)
+    {
+        add_4D_tensor_argument(idx, _output, collapsed_window);
+    }
+
+    enqueue(queue, *this, collapsed_window, lws_hint());
+}
+} // namespace arm_compute
\ No newline at end of file
diff --git a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
new file mode 100644
index 0000000..2b0987f
--- /dev/null
+++ b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h"
+
+#include "arm_compute/core/CL/kernels/CLInstanceNormalizationLayerKernel.h"
+#include "arm_compute/core/Types.h"
+
+namespace arm_compute
+{
+CLInstanceNormalizationLayer::CLInstanceNormalizationLayer()
+{
+}
+
+void CLInstanceNormalizationLayer::configure(ICLTensor *input, ICLTensor *output, float gamma, float beta, float epsilon)
+{
+    auto k = arm_compute::support::cpp14::make_unique<CLInstanceNormalizationLayerKernel>();
+    k->configure(input, output, gamma, beta, epsilon);
+    _kernel = std::move(k);
+}
+
+Status CLInstanceNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon)
+{
+    return CLInstanceNormalizationLayerKernel::validate(input, output, gamma, beta, epsilon);
+}
+} // namespace arm_compute
\ No newline at end of file
diff --git a/tests/validation/CL/InstanceNormalizationLayer.cpp b/tests/validation/CL/InstanceNormalizationLayer.cpp
new file mode 100644
index 0000000..165ab1f
--- /dev/null
+++ b/tests/validation/CL/InstanceNormalizationLayer.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLTensor.h"
+#include "arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h"
+#include "arm_compute/runtime/TensorAllocator.h"
+#include "tests/CL/CLAccessor.h"
+#include "tests/PaddingCalculator.h"
+#include "tests/datasets/ShapeDatasets.h"
+#include "tests/framework/Asserts.h"
+#include "tests/framework/Macros.h"
+#include "tests/framework/datasets/Datasets.h"
+#include "tests/validation/Validation.h"
+#include "tests/validation/fixtures/InstanceNormalizationLayerFixture.h"
+
+namespace arm_compute
+{
+namespace test
+{
+namespace validation
+{
+namespace
+{
+/** Tolerance for float operations */
+AbsoluteTolerance<float> tolerance_f32(0.001f);
+AbsoluteTolerance<float> tolerance_f16(2.f);
+} // namespace
+
+TEST_SUITE(CL)
+TEST_SUITE(InstanceNormalizationLayer)
+
+// *INDENT-OFF*
+// clang-format off
+DATA_TEST_CASE(Validate, framework::DatasetMode::ALL, zip(zip(
+    framework::dataset::make("InputInfo",  { TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching data type input/output
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32), // Mismatching shape input/output
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 2, DataType::F32), // Number of Input channels != 1
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16), // DataType != F32
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32)
+                                           }),
+    framework::dataset::make("OutputInfo", { TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F16),
+                                             TensorInfo(TensorShape(256U, 64U, 32U, 4U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::S16),
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32),
+                                             TensorInfo(TensorShape(128U, 64U, 32U, 4U), 1, DataType::F32)
+                                           })),
+    framework::dataset::make("Expected",   { false, false, false, false, true, true, true, true })),
+    input_info, output_info, expected)
+{
+    bool is_valid = bool(CLInstanceNormalizationLayer::validate(&input_info.clone()->set_is_resizable(false),
+                                                      &output_info.clone()->set_is_resizable(false)
+                                                      ));
+    ARM_COMPUTE_EXPECT(is_valid == expected, framework::LogLevel::ERRORS);
+}
+// clang-format on
+// *INDENT-ON*
+
+template <typename T>
+using CLInstanceNormalizationLayerFixture = InstanceNormalizationLayerValidationFixture<CLTensor, CLAccessor, CLInstanceNormalizationLayer, T>;
+
+TEST_SUITE(FP32)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLInstanceNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(datasets::Small4DShapes(),
+                                               framework::dataset::make("DataType", DataType::F32)),
+                                       framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+                               framework::dataset::make("InPlace", { false, true })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLInstanceNormalizationLayerFixture<float>, framework::DatasetMode::NIGHTLY,
+                       combine(combine(combine(datasets::Large4DShapes(),
+                                               framework::dataset::make("DataType", DataType::F32)),
+                                       framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+                               framework::dataset::make("InPlace", { false, true })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f32);
+}
+TEST_SUITE_END() // FP32
+TEST_SUITE(FP16)
+FIXTURE_DATA_TEST_CASE(RunSmall, CLInstanceNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(datasets::SmallShapes(),
+                                               framework::dataset::make("DataType", DataType::F16)),
+                                       framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+                               framework::dataset::make("InPlace", { false, true })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+
+FIXTURE_DATA_TEST_CASE(RunLarge, CLInstanceNormalizationLayerFixture<half>, framework::DatasetMode::NIGHTLY,
+                       combine(combine(combine(datasets::LargeShapes(),
+                                               framework::dataset::make("DataType", DataType::F16)),
+                                       framework::dataset::make("DataLayout", { DataLayout::NCHW, DataLayout::NHWC })),
+                               framework::dataset::make("InPlace", { false, true })))
+{
+    // Validate output
+    validate(CLAccessor(_target), _reference, tolerance_f16);
+}
+TEST_SUITE_END() // FP16
+TEST_SUITE_END() // InstanceNormalizationLayer
+TEST_SUITE_END() // CL
+} // namespace validation
+} // namespace test
+} // namespace arm_compute