COMPMID-987: Make beta and gamma optional in BatchNormalization

Currently we have beta and gamma compulsory in Batch normalization. There are
network that might not need one or both of those. Thus these should be optional
with beta(offset) defaulting to zero and gamma(scale) to 1. Will also reduce
some memory requirements.

Change-Id: I15bf1ec14b814be2acebf1be1a4fba9c4fbd3190
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/123237
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Anthony Barbier <anthony.barbier@arm.com>
diff --git a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
index dbb25dd..8015f08 100644
--- a/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/CL/kernels/CLBatchNormalizationLayerKernel.h
@@ -58,12 +58,12 @@
      * @param[out]     output   Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in]      mean     Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in]      var      Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      beta     Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      gamma    Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      epsilon  Small value to avoid division with zero.
+     * @param[in]      beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
+     * @param[in]      gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
+     * @param[in]      epsilon  (Optional) Small value to avoid division with zero. Default value is 0.001f.
      * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
      */
-    void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon,
+    void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta = nullptr, const ICLTensor *gamma = nullptr, float epsilon = 0.001f,
                    ActivationLayerInfo act_info = ActivationLayerInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref CLBatchNormalizationLayerKernel
      *
@@ -73,17 +73,17 @@
      * @param[in] output   Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in] mean     Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in] var      Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] beta     Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] gamma    Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] epsilon  Small value to avoid division with zero.
+     * @param[in] beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
+     * @param[in] gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
+     * @param[in] epsilon  (Optional) Small value to avoid division with zero. Default value is 0.001f.
      * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
      *
      * @return a status
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *output,
                            const ITensorInfo *mean, const ITensorInfo *var,
-                           const ITensorInfo *beta, const ITensorInfo *gamma,
-                           float epsilon, ActivationLayerInfo act_info);
+                           const ITensorInfo *beta = nullptr, const ITensorInfo *gamma = nullptr,
+                           float epsilon = 0.001f, ActivationLayerInfo act_info = ActivationLayerInfo());
 
     // Inherited methods overridden:
     void run(const Window &window, cl::CommandQueue &queue) override;
diff --git a/arm_compute/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.h b/arm_compute/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.h
index 754268a..bf971a2 100644
--- a/arm_compute/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.h
@@ -55,13 +55,32 @@
      * @param[out] output   Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in]  mean     Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in]  var      Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]  beta     Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]  gamma    Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]  epsilon  Small value to avoid division with zero.
+     * @param[in]  beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
+     * @param[in]  gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
+     * @param[in]  epsilon  (optional) Small value to avoid division with zero.
      * @param[in]  act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
      */
-    void configure(const IGCTensor *input, IGCTensor *output, const IGCTensor *mean, const IGCTensor *var, const IGCTensor *beta, const IGCTensor *gamma, float epsilon,
+    void configure(const IGCTensor *input, IGCTensor *output, const IGCTensor *mean, const IGCTensor *var, const IGCTensor *beta = nullptr, const IGCTensor *gamma = nullptr, float epsilon = 0.001f,
                    ActivationLayerInfo act_info = ActivationLayerInfo());
+    /** Static function to check if given info will lead to a valid configuration of @ref GCBatchNormalizationLayerKernel
+     *
+     * @param[in] input    Source tensor info. In case of @p output tensor info = nullptr, this tensor will store the result.
+     *                     3 lower dimensions represent a single input with dimensions [width, height, FM].
+     *                     The rest are optional and used for representing batches. Data types supported: QS8/QS16/F16/F32.
+     * @param[in] output   Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
+     * @param[in] mean     Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+     * @param[in] var      Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
+     * @param[in] beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
+     * @param[in] gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
+     * @param[in] epsilon  (Optional) Small value to avoid division with zero. Default value is 0.001f.
+     * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
+     *
+     * @return a status
+     */
+    static Status validate(const ITensorInfo *input, const ITensorInfo *output,
+                           const ITensorInfo *mean, const ITensorInfo *var,
+                           const ITensorInfo *beta = nullptr, const ITensorInfo *gamma = nullptr,
+                           float epsilon = 0.001f, ActivationLayerInfo act_info = ActivationLayerInfo());
 
     // Inherited methods overridden:
     void run(const Window &window) override;
diff --git a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h
index 2408a66..ae6b863 100644
--- a/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h
+++ b/arm_compute/core/NEON/kernels/NEBatchNormalizationLayerKernel.h
@@ -61,13 +61,12 @@
      * @param[out]     output   Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in]      mean     Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in]      var      Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      beta     Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      gamma    Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      epsilon  Small value to avoid division with zero.
+     * @param[in]      beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
+     * @param[in]      gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
+     * @param[in]      epsilon  (Optional) Small value to avoid division with zero. Default value is 0.001f.
      * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
-     *                          Data types supported: F32
      */
-    void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon,
+    void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta = nullptr, const ITensor *gamma = nullptr, float epsilon = 0.001f,
                    ActivationLayerInfo act_info = ActivationLayerInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref NEBatchNormalizationLayerKernel
      *
@@ -77,18 +76,17 @@
      * @param[in] output   Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in] mean     Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in] var      Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] beta     Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] gamma    Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] epsilon  Small value to avoid division with zero.
+     * @param[in] beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
+     * @param[in] gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
+     * @param[in] epsilon  (Optional) Small value to avoid division with zero. Default value is 0.001f.
      * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
-     *                     Data types supported: F32
      *
      * @return a status
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *output,
                            const ITensorInfo *mean, const ITensorInfo *var,
-                           const ITensorInfo *beta, const ITensorInfo *gamma,
-                           float epsilon, ActivationLayerInfo act_info);
+                           const ITensorInfo *beta = nullptr, const ITensorInfo *gamma = nullptr,
+                           float epsilon = 0.001f, ActivationLayerInfo act_info = ActivationLayerInfo());
 
     // Inherited methods overridden:
     void run(const Window &window, const ThreadInfo &info) override;
diff --git a/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h b/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
index 39f567d..9386a86 100644
--- a/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
+++ b/arm_compute/runtime/CL/functions/CLBatchNormalizationLayer.h
@@ -54,12 +54,12 @@
      * @param[out]     output   Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in]      mean     Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in]      var      Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      beta     Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      gamma    Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      epsilon  Small value to avoid division with zero.
+     * @param[in]      beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
+     * @param[in]      gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
+     * @param[in]      epsilon  (Optional) Small value to avoid division with zero. Default value is 0.001f.
      * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
      */
-    void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma, float epsilon,
+    void configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta = nullptr, const ICLTensor *gamma = nullptr, float epsilon = 0.001f,
                    ActivationLayerInfo act_info = ActivationLayerInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref CLBatchNormalizationLayer
      *
@@ -69,17 +69,17 @@
      * @param[in] output   Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in] mean     Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in] var      Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] beta     Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] gamma    Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] epsilon  Small value to avoid division with zero.
+     * @param[in] beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
+     * @param[in] gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
+     * @param[in] epsilon  (Optional) Small value to avoid division with zero. Default value is 0.001f.
      * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
      *
      * @return a status
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *output,
                            const ITensorInfo *mean, const ITensorInfo *var,
-                           const ITensorInfo *beta, const ITensorInfo *gamma,
-                           float epsilon, ActivationLayerInfo act_info);
+                           const ITensorInfo *beta = nullptr, const ITensorInfo *gamma = nullptr,
+                           float epsilon = 0.001f, ActivationLayerInfo act_info = ActivationLayerInfo());
 
     // Inherited methods overridden:
     void run() override;
diff --git a/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h b/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
index 85c6266..feb2087 100644
--- a/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
+++ b/arm_compute/runtime/NEON/functions/NEBatchNormalizationLayer.h
@@ -54,13 +54,12 @@
      * @param[out]     output   Destination tensor. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in]      mean     Mean values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in]      var      Variance values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      beta     Beta values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      gamma    Gamma values tensor. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in]      epsilon  Small value to avoid division with zero.
+     * @param[in]      beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
+     * @param[in]      gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
+     * @param[in]      epsilon  (Optional) Small value to avoid division with zero. Default value is 0.001f.
      * @param[in]      act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
-     *                          Data types supported: F32
      */
-    void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta, const ITensor *gamma, float epsilon,
+    void configure(ITensor *input, ITensor *output, const ITensor *mean, const ITensor *var, const ITensor *beta = nullptr, const ITensor *gamma = nullptr, float epsilon = 0.001f,
                    ActivationLayerInfo act_info = ActivationLayerInfo());
     /** Static function to check if given info will lead to a valid configuration of @ref NEBatchNormalizationLayer
      *
@@ -70,18 +69,17 @@
      * @param[in] output   Destination tensor info. Output will have the same number of dimensions as input. Data type supported: same as @p input
      * @param[in] mean     Mean values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
      * @param[in] var      Variance values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] beta     Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] gamma    Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. Data types supported: Same as @p input
-     * @param[in] epsilon  Small value to avoid division with zero.
+     * @param[in] beta     (Optional) Beta values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for beta is 0. Data types supported: Same as @p input
+     * @param[in] gamma    (Optional) Gamma values tensor info. 1 dimension with size equal to the feature maps [FM]. If not provided, default value for gamma is 1. Data types supported: Same as @p input
+     * @param[in] epsilon  (Optional) Small value to avoid division with zero. Default value is 0.001f.
      * @param[in] act_info (Optional) Activation layer information in case of a fused activation. Only RELU, BOUNDED_RELU and LU_BOUNDED_RELU supported.
-     *                     Data types supported: F32
      *
      * @return a status
      */
     static Status validate(const ITensorInfo *input, const ITensorInfo *output,
                            const ITensorInfo *mean, const ITensorInfo *var,
-                           const ITensorInfo *beta, const ITensorInfo *gamma,
-                           float epsilon, ActivationLayerInfo act_info);
+                           const ITensorInfo *beta = nullptr, const ITensorInfo *gamma = nullptr,
+                           float epsilon = 0.001f, ActivationLayerInfo act_info = ActivationLayerInfo());
 
     // Inherited methods overridden:
     void run() override;
diff --git a/src/core/CL/cl_kernels/batchnormalization_layer.cl b/src/core/CL/cl_kernels/batchnormalization_layer.cl
index 0b61b56..29b62d3 100644
--- a/src/core/CL/cl_kernels/batchnormalization_layer.cl
+++ b/src/core/CL/cl_kernels/batchnormalization_layer.cl
@@ -93,8 +93,12 @@
 #endif /* not IN_PLACE */
                                        VECTOR_DECLARATION(mean),
                                        VECTOR_DECLARATION(var),
+#ifndef USE_DEFAULT_BETA
                                        VECTOR_DECLARATION(beta),
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
                                        VECTOR_DECLARATION(gamma),
+#endif /* USE_DEFAULT_GAMMA */
                                        float epsilon)
 {
     Tensor3D in = CONVERT_TO_TENSOR3D_STRUCT(input);
@@ -103,10 +107,14 @@
 #else  /* IN_PLACE */
     Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT(output);
 #endif /* IN_PLACE */
-    Vector mean  = CONVERT_TO_VECTOR_STRUCT(mean);
-    Vector var   = CONVERT_TO_VECTOR_STRUCT(var);
-    Vector beta  = CONVERT_TO_VECTOR_STRUCT(beta);
+    Vector mean = CONVERT_TO_VECTOR_STRUCT(mean);
+    Vector var  = CONVERT_TO_VECTOR_STRUCT(var);
+#ifndef USE_DEFAULT_BETA
+    Vector beta = CONVERT_TO_VECTOR_STRUCT(beta);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
     Vector gamma = CONVERT_TO_VECTOR_STRUCT(gamma);
+#endif /* USE_DEFAULT_GAMMA */
 
     VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
     data = 0;
@@ -117,9 +125,7 @@
     VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
     x_bar = 0;
     VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-    gamma_vec = 0;
-    VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-    beta_vec = 0;
+    res = 0;
 
     const int current_slice = get_global_id(2);
 
@@ -132,11 +138,22 @@
     numerator = SUB_OP(data, numerator);
     x_bar     = MUL_OP(numerator, denominator);
 
-    gamma_vec = *((__global DATA_TYPE *)(gamma.ptr + current_slice * gamma.stride_x));
-    beta_vec  = *((__global DATA_TYPE *)(beta.ptr + current_slice * beta.stride_x));
-
+#ifndef USE_DEFAULT_GAMMA
     VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
-    res = ADD_OP(MUL_OP(gamma_vec, x_bar), beta_vec);
+    gamma_vec = *((__global DATA_TYPE *)(gamma.ptr + current_slice * gamma.stride_x));
+
+    res = MUL_OP(gamma_vec, x_bar);
+#else  /* USE_DEFAULT_GAMMA */
+    // gamma is equal to 1, no need to perform multiplications
+    res = x_bar;
+#endif /* USE_DEFAULT_GAMMA */
+
+#ifndef USE_DEFAULT_BETA
+    VEC_DATA_TYPE(DATA_TYPE, VEC_SIZE)
+    beta_vec = *((__global DATA_TYPE *)(beta.ptr + current_slice * beta.stride_x));
+    // beta is not zero, hence we need to perform the addition
+    res = ADD_OP(res, beta_vec);
+#endif /* USE_DEFAULT_BETA */
 
     res = ACTIVATION_FUNC(res);
 
@@ -144,4 +161,4 @@
     (res, 0, (__global DATA_TYPE *)out.ptr);
 }
 
-#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) */
\ No newline at end of file
+#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) */
diff --git a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
index 95c8250..62f21ee 100644
--- a/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLBatchNormalizationLayerKernel.cpp
@@ -46,9 +46,22 @@
 {
     ARM_COMPUTE_UNUSED(epsilon);
     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QS8, DataType::QS16, DataType::F16, DataType::F32);
-    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma);
-    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var, beta, gamma);
-    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var, beta, gamma);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var);
+    if(beta != nullptr)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, beta);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, beta);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, beta);
+    }
+    if(gamma != nullptr)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, gamma);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, gamma);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, gamma);
+    }
+
     ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != mean->dimension(0));
     if(act_info.enabled())
     {
@@ -108,7 +121,7 @@
 void CLBatchNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, const ICLTensor *mean, const ICLTensor *var, const ICLTensor *beta, const ICLTensor *gamma,
                                                 float epsilon, ActivationLayerInfo act_info)
 {
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var, beta, gamma);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var);
 
     _input   = input;
     _output  = output;
@@ -120,15 +133,9 @@
 
     _run_in_place = (output == nullptr) || (output == input);
 
-    if(output != nullptr)
-    {
-        ARM_COMPUTE_ERROR_ON_NULLPTR(input->info(), output->info());
-        // Output tensor auto initialization if not yet initialized
-        auto_init_if_empty(*output->info(), *input->info()->clone());
-    }
-
     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr,
-                                                  mean->info(), var->info(), beta->info(), gamma->info(), epsilon, act_info));
+                                                  mean->info(), var->info(), (beta != nullptr) ? beta->info() : nullptr,
+                                                  (gamma != nullptr) ? gamma->info() : nullptr, epsilon, act_info));
 
     const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
 
@@ -141,13 +148,23 @@
     build_opts.add_option_if(act_info.enabled(), "-DB_VAL=" + float_to_string_with_full_precision(act_info.b()));
     build_opts.add_option_if(_run_in_place, "-DIN_PLACE");
     build_opts.add_option_if(is_data_type_fixed_point(input->info()->data_type()), "-DFIXED_POINT_POSITION=" + support::cpp11::to_string(input->info()->fixed_point_position()));
+    build_opts.add_option_if(beta == nullptr, "-DUSE_DEFAULT_BETA");
+    build_opts.add_option_if(gamma == nullptr, "-DUSE_DEFAULT_GAMMA");
 
     // Create kernel
     _kernel = static_cast<cl::Kernel>(CLKernelLibrary::get().create_kernel("batchnormalization_layer", build_opts.options()));
 
     // Set kernel static arguments
     unsigned int include_output = (!_run_in_place) ? 1 : 0;
-    unsigned int idx            = (1 + include_output) * num_arguments_per_3D_tensor() + 4 * num_arguments_per_1D_tensor(); // Skip the input and output parameters
+    unsigned int idx            = (1 + include_output) * num_arguments_per_3D_tensor() + 2 * num_arguments_per_1D_tensor(); // Skip the input and output parameters
+    if(_beta != nullptr)
+    {
+        idx += num_arguments_per_1D_tensor(); // Skip beta parameter
+    }
+    if(_gamma != nullptr)
+    {
+        idx += num_arguments_per_1D_tensor(); // Skip gamma parameter
+    }
     _kernel.setArg<cl_float>(idx++, _epsilon);
 
     // Configure kernel window
@@ -191,8 +208,14 @@
     unsigned int idx            = (1 + include_output) * num_arguments_per_3D_tensor();
     add_1D_tensor_argument(idx, _mean, vector_slice);
     add_1D_tensor_argument(idx, _var, vector_slice);
-    add_1D_tensor_argument(idx, _beta, vector_slice);
-    add_1D_tensor_argument(idx, _gamma, vector_slice);
+    if(_beta != nullptr)
+    {
+        add_1D_tensor_argument(idx, _beta, vector_slice);
+    }
+    if(_gamma != nullptr)
+    {
+        add_1D_tensor_argument(idx, _gamma, vector_slice);
+    }
 
     do
     {
diff --git a/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs b/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs
index 7629b25..81be967 100644
--- a/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs
+++ b/src/core/GLES_COMPUTE/cs_shaders/batchnormalization_layer.cs
@@ -50,6 +50,8 @@
  *
  * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
  * @note Epsilon parameter in the batch normalization equation should be given as a preprocessor argument using "#define EPSILON". e.g. "#define EPSILON 0.1"
+ * @note Beta is optional with default value of 0. If not provided, the preprocessor argument "USE_DEFAULT_BETA" should be given
+ * @note Gamma is optional with default value of 1. If not provided, the preprocessor argument "USE_DEFAULT_GAMMA" should be given
  *
  * @param[in]  src_ptr     Pointer to the first source tensor. Supported data types: F16/F32
  * @param[in]  src_attrs   The attributes of the source tensor
@@ -59,10 +61,10 @@
  * @param[in]  mean_attrs  The attributes of the mean tensor
  * @param[in]  var_ptr     Pointer to the var tensor. Supported data types: same as @p src_ptr
  * @param[in]  var_attrs   The attributes of the var tensor
- * @param[in]  beta_ptr    Pointer to the beta source tensor. Supported data types: same as @p src_ptr
- * @param[in]  beta_attrs  The attributes of the beta tensor
- * @param[in]  gamma_ptr   Pointer to the gamma source tensor. Supported data types: same as @p src_ptr
- * @param[in]  gamma_attrs The attributes of the gamma tensor
+ * @param[in]  beta_ptr    (Optional) Pointer to the beta source tensor. If not provided, default value of beta is 0. Supported data types: same as @p src_ptr
+ * @param[in]  beta_attrs  (Optional) The attributes of the beta tensor
+ * @param[in]  gamma_ptr   (Optional) Pointer to the gamma source tensor. If not provided, default value of gamma is 1. Supported data types: same as @p src_ptr
+ * @param[in]  gamma_attrs (Optional) The attributes of the gamma tensor
  */
 SHADER_PARAMS_DECLARATION
 {
@@ -70,8 +72,12 @@
     Tensor3DAttributes dst_attrs;
     VectorAttributes   mean_attrs;
     VectorAttributes   var_attrs;
-    VectorAttributes   beta_attrs;
-    VectorAttributes   gamma_attrs;
+#ifndef USE_DEFAULT_BETA
+    VectorAttributes beta_attrs;
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+    VectorAttributes gamma_attrs;
+#endif /* USE_DEFAULT_GAMMA */
 };
 
 #ifdef DATA_TYPE_FP32
@@ -79,24 +85,34 @@
 TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
 TENSOR_DECLARATION(3, meanBuffer, float, mean_ptr, mean_shift, 2, readonly);
 TENSOR_DECLARATION(4, varBuffer, float, var_ptr, var_shift, 2, readonly);
+#ifndef USE_DEFAULT_BETA
 TENSOR_DECLARATION(5, betaBuffer, float, beta_ptr, beta_shift, 2, readonly);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+#ifdef USE_DEFAULT_BETA
+TENSOR_DECLARATION(5, gammaBuffer, float, gamma_ptr, gamma_shift, 2, readonly);
+#else  /* USE_DEFAULT_BETA */
 TENSOR_DECLARATION(6, gammaBuffer, float, gamma_ptr, gamma_shift, 2, readonly);
+#endif /* USE_DEFAULT_BETA */
+#endif /* USE_DEFAULT_GAMMA */
 
 void main(void)
 {
-    Tensor3DIterator src_iter   = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
-    Tensor3DIterator dst_iter   = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
-    VectorIterator   mean_iter  = CONVERT_TO_VECTOR_ITERATOR(mean_attrs, mean_shift);
-    VectorIterator   var_iter   = CONVERT_TO_VECTOR_ITERATOR(var_attrs, var_shift);
-    VectorIterator   beta_iter  = CONVERT_TO_VECTOR_ITERATOR(beta_attrs, beta_shift);
-    VectorIterator   gamma_iter = CONVERT_TO_VECTOR_ITERATOR(gamma_attrs, gamma_shift);
+    Tensor3DIterator src_iter  = CONVERT_TO_TENSOR3D_ITERATOR(src_attrs, src_shift);
+    Tensor3DIterator dst_iter  = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
+    VectorIterator   mean_iter = CONVERT_TO_VECTOR_ITERATOR(mean_attrs, mean_shift);
+    VectorIterator   var_iter  = CONVERT_TO_VECTOR_ITERATOR(var_attrs, var_shift);
+#ifndef USE_DEFAULT_BETA
+    VectorIterator beta_iter = CONVERT_TO_VECTOR_ITERATOR(beta_attrs, beta_shift);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+    VectorIterator gamma_iter = CONVERT_TO_VECTOR_ITERATOR(gamma_attrs, gamma_shift);
+#endif /* USE_DEFAULT_GAMMA */
 
     float input_value = 0.f;
     float denominator = 0.f;
     float numerator   = 0.f;
     float x_bar       = 0.f;
-    float gamma_param = 0.f;
-    float beta_param  = 0.f;
 
     uint current_slice = gl_GlobalInvocationID.z;
 
@@ -109,10 +125,18 @@
     numerator = SUB_OP(input_value, numerator);
     x_bar     = MUL_OP(numerator, denominator);
 
-    gamma_param = LOAD(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * beta_attrs.stride_x));
-    beta_param  = LOAD(beta_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(beta_iter, current_slice * beta_attrs.stride_x));
+#ifndef USE_DEFAULT_GAMMA
+    float gamma_param = LOAD(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * gamma_attrs.stride_x));
 
-    STORE_CURRENT_ITEM(dst_ptr, dst_iter, ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param)));
+    x_bar = MUL_OP(gamma_param, x_bar);
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
+    float beta_param = LOAD(beta_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(beta_iter, current_slice * beta_attrs.stride_x));
+
+    x_bar = ADD_OP(x_bar, beta_param);
+#endif /* USE_DEFAULT_BETA */
+
+    STORE_CURRENT_ITEM(dst_ptr, dst_iter, ACTIVATION_FUNC(x_bar));
 }
 
 #elif defined(DATA_TYPE_FP16)
@@ -120,8 +144,16 @@
 TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
 TENSOR_DECLARATION(3, meanBuffer, uvec2, mean_ptr, mean_shift, 3, readonly);
 TENSOR_DECLARATION(4, varBuffer, uvec2, var_ptr, var_shift, 3, readonly);
+#ifndef USE_DEFAULT_BETA
 TENSOR_DECLARATION(5, betaBuffer, uvec2, beta_ptr, beta_shift, 3, readonly);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
+#ifdef USE_DEFAULT_BETA
+TENSOR_DECLARATION(5, gammaBuffer, uvec2, gamma_ptr, gamma_shift, 3, readonly);
+#else  /* USE_DEFAULT_BETA */
 TENSOR_DECLARATION(6, gammaBuffer, uvec2, gamma_ptr, gamma_shift, 3, readonly);
+#endif /* USE_DEFAULT_BETA */
+#endif /* USE_DEFAULT_GAMMA */
 
 void main(void)
 {
@@ -129,14 +161,18 @@
     Tensor3DIterator dst_iter   = CONVERT_TO_TENSOR3D_ITERATOR(dst_attrs, dst_shift);
     VectorIterator   mean_iter  = CONVERT_TO_VECTOR_ITERATOR(mean_attrs, mean_shift);
     VectorIterator   var_iter   = CONVERT_TO_VECTOR_ITERATOR(var_attrs, var_shift);
+#ifndef USE_DEFAULT_BETA
     VectorIterator   beta_iter  = CONVERT_TO_VECTOR_ITERATOR(beta_attrs, beta_shift);
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_GAMMA
     VectorIterator   gamma_iter = CONVERT_TO_VECTOR_ITERATOR(gamma_attrs, gamma_shift);
+#endif /* USE_DEFAULT_GAMMA */
 
     vec4  unpacked_s[5];
     float denominator;
     float numerator;
-    float gamma_param;
-    float beta_param;
+    float gamma_param = 1.f;
+    float beta_param  = 0.f;
     vec4  x_bar;
     vec4  result;
 
@@ -144,68 +180,87 @@
     unpacked_s[0]      = LOAD_UNPACK4_CURRENT_ITEM_HALF(src_ptr, src_iter);
     unpacked_s[1]      = LOAD_UNPACK4_HALF(var_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(var_iter, current_slice * var_attrs.stride_x));
     unpacked_s[2]      = LOAD_UNPACK4_HALF(mean_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(mean_iter, current_slice * mean_attrs.stride_x));
-    unpacked_s[3]      = LOAD_UNPACK4_HALF(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * beta_attrs.stride_x));
+#ifndef USE_DEFAULT_GAMMA
+    unpacked_s[3]      = LOAD_UNPACK4_HALF(gamma_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(gamma_iter, current_slice * gamma_attrs.stride_x));
+#endif /* USE_DEFAULT_BETA */
+#ifndef USE_DEFAULT_BETA
     unpacked_s[4]      = LOAD_UNPACK4_HALF(beta_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(beta_iter, current_slice * beta_attrs.stride_x));
+#endif /* USE_DEFAULT_GAMMA */
 
     if((current_slice % uint(4)) == uint(0))
     {
         denominator = unpacked_s[1].x;
         denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
 
-        //Calculate x bar and store results
-        numerator = unpacked_s[2].x;
-        x_bar     = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+        // Calculate x bar
+        numerator   = unpacked_s[2].x;
+        x_bar       = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
 
+#ifndef USE_DEFAULT_GAMMA
         gamma_param = unpacked_s[3].x;
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
         beta_param  = unpacked_s[4].x;
-        result      = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
-
-        STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
+#endif /* USE_DEFAULT_BETA */
     }
     else if((current_slice % uint(4)) == uint(1))
     {
         denominator = unpacked_s[1].y;
         denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
 
-        //Calculate x bar and store results
-        numerator = unpacked_s[2].y;
-        x_bar     = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+        // Calculate x bar
+        numerator   = unpacked_s[2].y;
+        x_bar       = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
 
+#ifndef USE_DEFAULT_GAMMA
         gamma_param = unpacked_s[3].y;
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
         beta_param  = unpacked_s[4].y;
-        result      = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
-
-        STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
+#endif /* USE_DEFAULT_BETA */
     }
     else if((current_slice % uint(4)) == uint(2))
     {
         denominator = unpacked_s[1].z;
         denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
 
-        //Calculate x bar and store results
-        numerator = unpacked_s[2].z;
-        x_bar     = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+        // Calculate x bar
+        numerator   = unpacked_s[2].z;
+        x_bar       = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
 
+#ifndef USE_DEFAULT_GAMMA
         gamma_param = unpacked_s[3].z;
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
         beta_param  = unpacked_s[4].z;
-        result      = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
-
-        STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
+#endif /* USE_DEFAULT_BETA */
     }
     else
     {
         denominator = unpacked_s[1].w;
         denominator = INVSQRT_OP(ADD_OP(denominator, SQCVT_SAT(float(ESPILON))));
 
-        //Calculate x bar and store results
-        numerator = unpacked_s[2].w;
-        x_bar     = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
+        // Calculate x bar
+        numerator   = unpacked_s[2].w;
+        x_bar       = MUL_OP(SUB_OP(unpacked_s[0], numerator), denominator);
 
+#ifndef USE_DEFAULT_GAMMA
         gamma_param = unpacked_s[3].w;
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
         beta_param  = unpacked_s[4].w;
-        result      = ACTIVATION_FUNC(ADD_OP(MUL_OP(gamma_param, x_bar), beta_param));
-
-        STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
+#endif /* USE_DEFAULT_BETA */
     }
+
+#ifndef USE_DEFAULT_GAMMA
+    x_bar = MUL_OP(gamma_param, x_bar);
+#endif /* USE_DEFAULT_GAMMA */
+#ifndef USE_DEFAULT_BETA
+    x_bar = ADD_OP(x_bar, beta_param);
+#endif /* USE_DEFAULT_BETA */
+
+    result = ACTIVATION_FUNC(x_bar);
+
+    STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, result);
 }
 #endif /*DATA_TYPE_FP16*/
diff --git a/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp b/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp
index cd93f69..9a592df 100644
--- a/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp
+++ b/src/core/GLES_COMPUTE/kernels/GCBatchNormalizationLayerKernel.cpp
@@ -36,6 +36,105 @@
 
 using namespace arm_compute;
 
+namespace
+{
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
+                          const ITensorInfo *mean, const ITensorInfo *var,
+                          const ITensorInfo *beta, const ITensorInfo *gamma,
+                          float epsilon, ActivationLayerInfo act_info)
+{
+    ARM_COMPUTE_UNUSED(epsilon);
+    ARM_COMPUTE_UNUSED(var);
+    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
+
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var);
+    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(mean, var);
+
+    if(output->total_size() != 0)
+    {
+        ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
+        ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+        ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
+    }
+
+    if(beta != nullptr)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, beta);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, beta);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, beta);
+    }
+    if(gamma != nullptr)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, gamma);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, gamma);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, gamma);
+    }
+    if(act_info.enabled())
+    {
+        ARM_COMPUTE_ERROR_ON(input->data_type() != DataType::F32 && input->data_type() != DataType::F16);
+        ARM_COMPUTE_ERROR_ON(act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::RELU
+                             && act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU
+                             && act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
+        ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
+    }
+    return Status{};
+}
+
+std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output,
+                                                        ITensorInfo *mean, ITensorInfo *var,
+                                                        ITensorInfo *beta, ITensorInfo *gamma)
+{
+    // Output tensor auto initialization if not yet initialized
+    auto_init_if_empty(*output, input->tensor_shape(), 1, input->data_type(), input->fixed_point_position());
+
+    unsigned int num_elems_processed_per_iteration = 1;
+    if(input->data_type() == DataType::F16)
+    {
+        num_elems_processed_per_iteration = 4;
+    }
+
+    // Configure kernel window
+    Window win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
+
+    AccessWindowHorizontal input_access(input, 0, num_elems_processed_per_iteration);
+    AccessWindowHorizontal output_access(output, 0, num_elems_processed_per_iteration);
+    AccessWindowStatic     mean_access(mean, 0, 0, mean->dimension(0) + 3, mean->dimension(1));
+    AccessWindowStatic     var_access(var, 0, 0, var->dimension(0) + 3, var->dimension(1));
+
+    bool window_changed = false;
+    if(beta != nullptr)
+    {
+        AccessWindowStatic beta_access(beta, 0, 0, beta->dimension(0) + 3, beta->dimension(1));
+        if(gamma != nullptr)
+        {
+            AccessWindowStatic gamma_access(gamma, 0, 0, gamma->dimension(0) + 3, gamma->dimension(1));
+            window_changed = update_window_and_padding(win, input_access, output_access, mean_access, var_access, beta_access, gamma_access);
+        }
+        else
+        {
+            window_changed = update_window_and_padding(win, input_access, output_access, mean_access, var_access, beta_access);
+        }
+    }
+    else
+    {
+        if(gamma != nullptr)
+        {
+            AccessWindowStatic gamma_access(gamma, 0, 0, gamma->dimension(0) + 3, gamma->dimension(1));
+            window_changed = update_window_and_padding(win, input_access, output_access, mean_access, var_access, gamma_access);
+        }
+        else
+        {
+            window_changed = update_window_and_padding(win, input_access, output_access, mean_access, var_access);
+        }
+    }
+    output_access.set_valid_region(win, input->valid_region());
+
+    Status err = (window_changed) ? ARM_COMPUTE_CREATE_ERROR(ErrorCode::RUNTIME_ERROR, "Insufficient Padding!") : Status{};
+    return std::make_pair(err, win);
+}
+} // namespace
+
 GCBatchNormalizationLayerKernel::GCBatchNormalizationLayerKernel()
     : _input(nullptr), _output(nullptr), _mean(nullptr), _var(nullptr), _beta(nullptr), _gamma(nullptr), _epsilon(0.0f)
 {
@@ -44,24 +143,11 @@
 void GCBatchNormalizationLayerKernel::configure(const IGCTensor *input, IGCTensor *output, const IGCTensor *mean, const IGCTensor *var, const IGCTensor *beta, const IGCTensor *gamma,
                                                 float epsilon, ActivationLayerInfo act_info)
 {
-    ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
-    ARM_COMPUTE_ERROR_ON_NULLPTR(output);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, output, mean, var);
 
-    // Output tensor auto initialization if not yet initialized
-    auto_init_if_empty(*output->info(), input->info()->tensor_shape(), 1, input->info()->data_type(), input->info()->fixed_point_position());
-
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input, output, mean, var, beta, gamma);
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input, output, mean, var, beta, gamma);
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(input, output);
-    ARM_COMPUTE_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma);
-    if(act_info.enabled())
-    {
-        ARM_COMPUTE_ERROR_ON(input->info()->data_type() != DataType::F32 && input->info()->data_type() != DataType::F16);
-        ARM_COMPUTE_ERROR_ON(act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::RELU
-                             && act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::BOUNDED_RELU
-                             && act_info.activation() != ActivationLayerInfo::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU);
-        ARM_COMPUTE_ERROR_ON(act_info.b() > act_info.a());
-    }
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output->info(), mean->info(), var->info(),
+                                                  (beta != nullptr) ? beta->info() : nullptr, (gamma != nullptr) ? gamma->info() : nullptr,
+                                                  epsilon, act_info));
 
     _input   = input;
     _output  = output;
@@ -71,12 +157,6 @@
     _gamma   = gamma;
     _epsilon = epsilon;
 
-    unsigned int num_elems_processed_per_iteration = 1;
-    if(input->info()->data_type() == DataType::F16)
-    {
-        num_elems_processed_per_iteration = 4;
-    }
-
     // Set build options
     std::set<std::string> build_opts;
     std::string           dt_name = (input->info()->data_type() == DataType::F32) ? "DATA_TYPE_FP32" : "DATA_TYPE_FP16";
@@ -85,6 +165,14 @@
     build_opts.emplace(("#define LOCAL_SIZE_X " + support::cpp11::to_string(1)));
     build_opts.emplace(("#define LOCAL_SIZE_Y " + support::cpp11::to_string(1)));
     build_opts.emplace(("#define LOCAL_SIZE_Z " + support::cpp11::to_string(1)));
+    if(beta == nullptr)
+    {
+        build_opts.emplace("#define USE_DEFAULT_BETA");
+    }
+    if(gamma == nullptr)
+    {
+        build_opts.emplace("#define USE_DEFAULT_GAMMA");
+    }
 
     if(act_info.enabled())
     {
@@ -97,19 +185,25 @@
     _kernel = static_cast<GCKernel>(GCKernelLibrary::get().create_kernel("batchnormalization_layer", build_opts));
 
     // Configure kernel window
-    Window win = calculate_max_window(*input->info(), Steps(num_elems_processed_per_iteration));
+    auto win_config = validate_and_configure_window(input->info(), output->info(), mean->info(), var->info(),
+                                                    (beta != nullptr) ? beta->info() : nullptr, (gamma != nullptr) ? gamma->info() : nullptr);
+    ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
 
-    AccessWindowHorizontal input_access(input->info(), 0, num_elems_processed_per_iteration);
-    AccessWindowHorizontal output_access(output->info(), 0, num_elems_processed_per_iteration);
-    AccessWindowStatic     mean_access(mean->info(), 0, 0, mean->info()->dimension(0) + 3, mean->info()->dimension(1));
-    AccessWindowStatic     var_access(var->info(), 0, 0, var->info()->dimension(0) + 3, var->info()->dimension(1));
-    AccessWindowStatic     beta_access(beta->info(), 0, 0, beta->info()->dimension(0) + 3, beta->info()->dimension(1));
-    AccessWindowStatic     gamma_access(gamma->info(), 0, 0, gamma->info()->dimension(0) + 3, gamma->info()->dimension(1));
+    IGCKernel::configure(win_config.second);
+}
 
-    update_window_and_padding(win, input_access, output_access, mean_access, var_access, beta_access, gamma_access);
-    output_access.set_valid_region(win, input->info()->valid_region());
+Status GCBatchNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
+                                                 const ITensorInfo *mean, const ITensorInfo *var,
+                                                 const ITensorInfo *beta, const ITensorInfo *gamma,
+                                                 float epsilon, ActivationLayerInfo act_info)
+{
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, mean, var, beta, gamma, epsilon, act_info));
+    ARM_COMPUTE_RETURN_ON_ERROR(validate_and_configure_window(input->clone().get(), output->clone().get(),
+                                                              mean->clone().get(), var->clone().get(),
+                                                              beta->clone().get(), gamma->clone().get())
+                                .first);
 
-    IGCKernel::configure(win);
+    return Status{};
 }
 
 void GCBatchNormalizationLayerKernel::run(const Window &window)
@@ -127,11 +221,18 @@
     Window vector_slice = window.first_slice_window_1D();
     vector_slice.set(Window::DimX, Window::Dimension(0, 0, 0));
 
-    unsigned int idx = 2 * num_arguments_per_3D_tensor();
-    add_1D_tensor_argument(idx, _mean, 3, vector_slice);
-    add_1D_tensor_argument(idx, _var, 4, vector_slice);
-    add_1D_tensor_argument(idx, _beta, 5, vector_slice);
-    add_1D_tensor_argument(idx, _gamma, 6, vector_slice);
+    unsigned int idx           = 2 * num_arguments_per_3D_tensor();
+    unsigned int binding_point = 3;
+    add_1D_tensor_argument(idx, _mean, binding_point, vector_slice);
+    add_1D_tensor_argument(idx, _var, ++binding_point, vector_slice);
+    if(_beta != nullptr)
+    {
+        add_1D_tensor_argument(idx, _beta, ++binding_point, vector_slice);
+    }
+    if(_gamma != nullptr)
+    {
+        add_1D_tensor_argument(idx, _gamma, ++binding_point, vector_slice);
+    }
 
     slice.shift(Window::DimX, -(_output->info()->padding()).left);
 
diff --git a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
index 1f730a2..d1bdfac 100644
--- a/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
+++ b/src/core/NEON/kernels/NEBatchNormalizationLayerKernel.cpp
@@ -62,9 +62,21 @@
         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, output);
     }
 
-    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var, beta, gamma);
-    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var, beta, gamma);
-    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var, beta, gamma);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, mean, var);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, mean, var);
+    ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, var);
+    if(beta != nullptr)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, beta);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, beta);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, beta);
+    }
+    if(gamma != nullptr)
+    {
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, gamma);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_FIXED_POINT(input, gamma);
+        ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(mean, gamma);
+    }
     ARM_COMPUTE_RETURN_ERROR_ON(input->dimension(2) != mean->dimension(0));
 
     return Status{};
@@ -72,6 +84,12 @@
 
 std::pair<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
 {
+    if(output != nullptr)
+    {
+        // Output tensor auto initialization if not yet initialized
+        auto_init_if_empty(*output, *input->clone());
+    }
+
     unsigned int num_elems_processed_per_iteration = 16 / input->element_size();
 
     Window                 win = calculate_max_window(*input, Steps(num_elems_processed_per_iteration));
@@ -99,13 +117,13 @@
     const int  fixed_point_position = _input->info()->fixed_point_position();
     const auto input_mean           = reinterpret_cast<const qint8_t *>(_mean->ptr_to_element(Coordinates(0, 0)));
     const auto input_var            = reinterpret_cast<const qint8_t *>(_var->ptr_to_element(Coordinates(0, 0)));
-    const auto input_gamma          = reinterpret_cast<const qint8_t *>(_gamma->ptr_to_element(Coordinates(0, 0)));
-    const auto input_beta           = reinterpret_cast<const qint8_t *>(_beta->ptr_to_element(Coordinates(0, 0)));
+    const auto input_gamma          = (_gamma != nullptr) ? reinterpret_cast<const qint8_t *>(_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+    const auto input_beta           = (_beta != nullptr) ? reinterpret_cast<const qint8_t *>(_beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
 
     qint8x16_t       mean_vec    = vdupq_n_qs8(0);
     qint8x16_t       var_vec     = vdupq_n_qs8(0);
-    qint8x16_t       gamma_vec   = vdupq_n_qs8(0);
-    qint8x16_t       beta_vec    = vdupq_n_qs8(0);
+    qint8x16_t       gamma_vec   = vdupq_n_qs8(sqcvt_qs8_f32(1, fixed_point_position));
+    qint8x16_t       beta_vec    = vdupq_n_qs8(sqcvt_qs8_f32(0, fixed_point_position));
     qint8x16_t       denominator = vdupq_n_qs8(0);
     const qint8x16_t epsilon_vec = vdupq_n_qs8(sqcvt_qs8_f32(_epsilon, fixed_point_position));
     execute_window_loop(window, [&](const Coordinates & id)
@@ -113,10 +131,16 @@
         if(slice != id.z())
         {
             // Conctruct vectors
-            mean_vec  = vdupq_n_qs8(*(input_mean + id.z()));
-            var_vec   = vdupq_n_qs8(*(input_var + id.z()));
-            gamma_vec = vdupq_n_qs8(*(input_gamma + id.z()));
-            beta_vec  = vdupq_n_qs8(*(input_beta + id.z()));
+            mean_vec = vdupq_n_qs8(*(input_mean + id.z()));
+            var_vec  = vdupq_n_qs8(*(input_var + id.z()));
+            if(input_gamma != nullptr)
+            {
+                gamma_vec = vdupq_n_qs8(*(input_gamma + id.z()));
+            }
+            if(input_beta != nullptr)
+            {
+                beta_vec = vdupq_n_qs8(*(input_beta + id.z()));
+            }
 
             // Calculate denominator
             denominator = vqinvsqrtq_qs8(vqaddq_qs8(var_vec, epsilon_vec), fixed_point_position);
@@ -146,13 +170,13 @@
     const int  fixed_point_position = _input->info()->fixed_point_position();
     const auto input_mean           = reinterpret_cast<const qint16_t *>(_mean->ptr_to_element(Coordinates(0, 0)));
     const auto input_var            = reinterpret_cast<const qint16_t *>(_var->ptr_to_element(Coordinates(0, 0)));
-    const auto input_gamma          = reinterpret_cast<const qint16_t *>(_gamma->ptr_to_element(Coordinates(0, 0)));
-    const auto input_beta           = reinterpret_cast<const qint16_t *>(_beta->ptr_to_element(Coordinates(0, 0)));
+    const auto input_gamma          = (_gamma != nullptr) ? reinterpret_cast<const qint16_t *>(_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+    const auto input_beta           = (_beta != nullptr) ? reinterpret_cast<const qint16_t *>(_beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
 
     qint16x8_t       mean_vec    = vdupq_n_qs16(0);
     qint16x8_t       var_vec     = vdupq_n_qs16(0);
-    qint16x8_t       gamma_vec   = vdupq_n_qs16(0);
-    qint16x8_t       beta_vec    = vdupq_n_qs16(0);
+    qint16x8_t       gamma_vec   = vdupq_n_qs16(sqcvt_qs16_f32(1, fixed_point_position));
+    qint16x8_t       beta_vec    = vdupq_n_qs16(sqcvt_qs16_f32(0, fixed_point_position));
     qint16x8_t       denominator = vdupq_n_qs16(0);
     const qint16x8_t epsilon_vec = vdupq_n_qs16(sqcvt_qs16_f32(_epsilon, fixed_point_position));
     execute_window_loop(window, [&](const Coordinates & id)
@@ -160,10 +184,16 @@
         if(slice != id.z())
         {
             // Conctruct vectors
-            mean_vec  = vdupq_n_qs16(*(input_mean + id.z()));
-            var_vec   = vdupq_n_qs16(*(input_var + id.z()));
-            gamma_vec = vdupq_n_qs16(*(input_gamma + id.z()));
-            beta_vec  = vdupq_n_qs16(*(input_beta + id.z()));
+            mean_vec = vdupq_n_qs16(*(input_mean + id.z()));
+            var_vec  = vdupq_n_qs16(*(input_var + id.z()));
+            if(input_gamma != nullptr)
+            {
+                gamma_vec = vdupq_n_qs16(*(input_gamma + id.z()));
+            }
+            if(input_beta != nullptr)
+            {
+                beta_vec = vdupq_n_qs16(*(input_beta + id.z()));
+            }
 
             // Calculate denominator
             denominator = vqinvsqrtq_qs16(vqaddq_qs16(var_vec, epsilon_vec), fixed_point_position);
@@ -194,12 +224,12 @@
 
     const auto input_mean  = reinterpret_cast<const float16_t *>(_mean->ptr_to_element(Coordinates(0, 0)));
     const auto input_var   = reinterpret_cast<const float16_t *>(_var->ptr_to_element(Coordinates(0, 0)));
-    const auto input_gamma = reinterpret_cast<const float16_t *>(_gamma->ptr_to_element(Coordinates(0, 0)));
-    const auto input_beta  = reinterpret_cast<const float16_t *>(_beta->ptr_to_element(Coordinates(0, 0)));
+    const auto input_gamma = (_gamma != nullptr) ? reinterpret_cast<const float16_t *>(_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+    const auto input_beta  = (_beta != nullptr) ? reinterpret_cast<const float16_t *>(_beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
 
     float16x8_t       mean_vec    = vdupq_n_f16(0.0);
     float16x8_t       var_vec     = vdupq_n_f16(0.0);
-    float16x8_t       gamma_vec   = vdupq_n_f16(0.0);
+    float16x8_t       gamma_vec   = vdupq_n_f16(1.0);
     float16x8_t       beta_vec    = vdupq_n_f16(0.0);
     float16x8_t       denominator = vdupq_n_f16(0.0);
     const float16x8_t epsilon_vec = vdupq_n_f16(_epsilon);
@@ -208,10 +238,16 @@
         if(slice != id.z())
         {
             // Conctruct vectors
-            mean_vec  = vdupq_n_f16(*(input_mean + id.z()));
-            var_vec   = vdupq_n_f16(*(input_var + id.z()));
-            gamma_vec = vdupq_n_f16(*(input_gamma + id.z()));
-            beta_vec  = vdupq_n_f16(*(input_beta + id.z()));
+            mean_vec = vdupq_n_f16(*(input_mean + id.z()));
+            var_vec  = vdupq_n_f16(*(input_var + id.z()));
+            if(input_gamma != nullptr)
+            {
+                gamma_vec = vdupq_n_f16(*(input_gamma + id.z()));
+            }
+            if(input_beta != nullptr)
+            {
+                beta_vec = vdupq_n_f16(*(input_beta + id.z()));
+            }
 
             // Calculate denominator
             denominator = vinvsqrtq_f16(vaddq_f16(var_vec, epsilon_vec));
@@ -241,12 +277,12 @@
 
     const auto input_mean  = reinterpret_cast<const float *>(_mean->ptr_to_element(Coordinates(0, 0)));
     const auto input_var   = reinterpret_cast<const float *>(_var->ptr_to_element(Coordinates(0, 0)));
-    const auto input_gamma = reinterpret_cast<const float *>(_gamma->ptr_to_element(Coordinates(0, 0)));
-    const auto input_beta  = reinterpret_cast<const float *>(_beta->ptr_to_element(Coordinates(0, 0)));
+    const auto input_gamma = (_gamma != nullptr) ? reinterpret_cast<const float *>(_gamma->ptr_to_element(Coordinates(0, 0))) : nullptr;
+    const auto input_beta  = (_beta != nullptr) ? reinterpret_cast<const float *>(_beta->ptr_to_element(Coordinates(0, 0))) : nullptr;
 
     float32x4_t       mean_vec    = vdupq_n_f32(0.0);
     float32x4_t       var_vec     = vdupq_n_f32(0.0);
-    float32x4_t       gamma_vec   = vdupq_n_f32(0.0);
+    float32x4_t       gamma_vec   = vdupq_n_f32(1.0);
     float32x4_t       beta_vec    = vdupq_n_f32(0.0);
     float32x4_t       denominator = vdupq_n_f32(0.0);
     const float32x4_t epsilon_vec = vdupq_n_f32(_epsilon);
@@ -255,10 +291,16 @@
         if(slice != id.z())
         {
             // Conctruct vectors
-            mean_vec  = vdupq_n_f32(*(input_mean + id.z()));
-            var_vec   = vdupq_n_f32(*(input_var + id.z()));
-            gamma_vec = vdupq_n_f32(*(input_gamma + id.z()));
-            beta_vec  = vdupq_n_f32(*(input_beta + id.z()));
+            mean_vec = vdupq_n_f32(*(input_mean + id.z()));
+            var_vec  = vdupq_n_f32(*(input_var + id.z()));
+            if(input_gamma != nullptr)
+            {
+                gamma_vec = vdupq_n_f32(*(input_gamma + id.z()));
+            }
+            if(input_beta != nullptr)
+            {
+                beta_vec = vdupq_n_f32(*(input_beta + id.z()));
+            }
 
             // Calculate denominator
             denominator = vinvsqrtq_f32(vaddq_f32(var_vec, epsilon_vec));
@@ -335,21 +377,12 @@
                                                 const ITensor *beta, const ITensor *gamma,
                                                 float epsilon, ActivationLayerInfo act_info)
 {
-    ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var, beta, gamma);
+    ARM_COMPUTE_ERROR_ON_NULLPTR(input, mean, var);
 
-    ITensorInfo *output_info = nullptr;
-
-    if(nullptr != output)
-    {
-        // Output tensor auto initialization if not yet initialized
-        auto_init_if_empty(*output->info(), *input->info());
-
-        output_info = output->info();
-    }
-
-    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), output_info,
+    ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), (output != nullptr) ? output->info() : nullptr,
                                                   mean->info(), var->info(),
-                                                  beta->info(), gamma->info(),
+                                                  (beta != nullptr) ? beta->info() : nullptr,
+                                                  (gamma != nullptr) ? gamma->info() : nullptr,
                                                   epsilon, act_info));
 
     _input    = input;
@@ -361,7 +394,8 @@
     _epsilon  = epsilon;
     _act_info = act_info;
 
-    if(output != nullptr)
+    const bool run_in_place = (output == nullptr) || (output == input);
+    if(!run_in_place)
     {
         _output = output;
     }
@@ -377,7 +411,7 @@
     }
 
     // Configure kernel window
-    auto win_config = validate_and_configure_window(input->info(), output_info);
+    auto win_config = validate_and_configure_window(input->info(), (run_in_place) ? nullptr : output->info());
     ARM_COMPUTE_ERROR_THROW_ON(win_config.first);
     INEKernel::configure(win_config.second);
 }
diff --git a/tests/AssetsLibrary.h b/tests/AssetsLibrary.h
index ae88824..4bbe4c5 100644
--- a/tests/AssetsLibrary.h
+++ b/tests/AssetsLibrary.h
@@ -352,6 +352,16 @@
     template <typename T>
     void fill_layer_data(T &&tensor, std::string name) const;
 
+    /** Fill a tensor with a constant value
+     *
+     * @param[in, out] tensor To be filled tensor.
+     * @param[in]      value  Value to be assigned to all elements of the input tensor.
+     *
+     * @note    @p value must be of the same type as the data type of @p tensor
+     */
+    template <typename T, typename D>
+    void fill_tensor_value(T &&tensor, D value) const;
+
 private:
     // Function prototype to convert between image formats.
     using Converter = void (*)(const RawTensor &src, RawTensor &dst);
@@ -774,6 +784,12 @@
         });
     }
 }
+
+template <typename T, typename D>
+void AssetsLibrary::fill_tensor_value(T &&tensor, D value) const
+{
+    fill_tensor_uniform(tensor, 0, value, value);
+}
 } // namespace test
 } // namespace arm_compute
 #endif /* __ARM_COMPUTE_TEST_TENSOR_LIBRARY_H__ */
diff --git a/tests/benchmark/CL/BatchNormalizationLayer.cpp b/tests/benchmark/CL/BatchNormalizationLayer.cpp
index 82c7800..3312319 100644
--- a/tests/benchmark/CL/BatchNormalizationLayer.cpp
+++ b/tests/benchmark/CL/BatchNormalizationLayer.cpp
@@ -51,19 +51,25 @@
 TEST_SUITE(CL)
 
 REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", 1)));
 
 REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", 1)));
 
 REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", 1)));
@@ -71,19 +77,25 @@
 TEST_SUITE(NIGHTLY)
 
 REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", { 4, 8 })));
 
 REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", { 4, 8 })));
 
 REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, CLBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", { 4, 8 })));
diff --git a/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp b/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp
index 6e5836e..9a2950b 100644
--- a/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp
+++ b/tests/benchmark/GLES_COMPUTE/BatchNormalizationLayer.cpp
@@ -51,19 +51,25 @@
 TEST_SUITE(GC)
 
 REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", 1)));
 
 REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), framework::dataset::make("ActivationInfo",
-                                                                                                                    ActivationLayerInfo())),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
+                                                                                                                    framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", 1)));
 
 REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", 1)));
@@ -71,19 +77,25 @@
 TEST_SUITE(NIGHTLY)
 
 REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", { 4, 8 })));
 
 REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(), framework::dataset::make("ActivationInfo",
-                                                                                                                    ActivationLayerInfo())),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
+                                                                                                                    framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", { 4, 8 })));
 
 REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, GCBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", { 4, 8 })));
diff --git a/tests/benchmark/NEON/BatchNormalizationLayer.cpp b/tests/benchmark/NEON/BatchNormalizationLayer.cpp
index 6d28318..786a5b1 100644
--- a/tests/benchmark/NEON/BatchNormalizationLayer.cpp
+++ b/tests/benchmark/NEON/BatchNormalizationLayer.cpp
@@ -56,36 +56,48 @@
 TEST_SUITE(NEON)
 
 REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", 1)));
 REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", 1)));
 
 REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::ALL,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", 1)));
 
 TEST_SUITE(NIGHTLY)
 REGISTER_FIXTURE_DATA_TEST_CASE(MobileNetBatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::MobileNetBatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", { 4, 8 })));
 REGISTER_FIXTURE_DATA_TEST_CASE(YOLOV2BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::YOLOV2BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", { 4, 8 })));
 
 REGISTER_FIXTURE_DATA_TEST_CASE(GoogLeNetInceptionV4BatchNormalizationLayer, NEBatchNormalizationLayerFixture, framework::DatasetMode::NIGHTLY,
-                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(framework::dataset::combine(datasets::GoogLeNetInceptionV4BatchNormalizationLayerDataset(),
+                                                                                                                    framework::dataset::combine(framework::dataset::make("UseGamma", { false, true }),
+                                                                                                                            framework::dataset::make("UseBeta", { false, true }))),
                                                                                                                     framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                         data_types),
                                                             framework::dataset::make("Batches", { 4, 8 })));
diff --git a/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h b/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
index fbb7700..c55bb2a 100644
--- a/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/benchmark/fixtures/BatchNormalizationLayerFixture.h
@@ -42,7 +42,7 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape tensor_shape, TensorShape param_shape, float epsilon, ActivationLayerInfo act_info, DataType data_type, int batches)
+    void setup(TensorShape tensor_shape, TensorShape param_shape, float epsilon, bool use_gamma, bool use_beta, ActivationLayerInfo act_info, DataType data_type, int batches)
     {
         // Set batched in source and destination shapes
         const unsigned int fixed_point_position = 4;
@@ -57,7 +57,9 @@
         gamma    = create_tensor<TensorType>(param_shape, data_type, 1, fixed_point_position);
 
         // Create and configure function
-        batch_norm_layer.configure(&src, &dst, &mean, &variance, &beta, &gamma, epsilon, act_info);
+        TensorType *beta_ptr  = use_beta ? &beta : nullptr;
+        TensorType *gamma_ptr = use_gamma ? &gamma : nullptr;
+        batch_norm_layer.configure(&src, &dst, &mean, &variance, beta_ptr, gamma_ptr, epsilon, act_info);
 
         // Allocate tensors
         src.allocator()->allocate();
diff --git a/tests/validation/CL/BatchNormalizationLayer.cpp b/tests/validation/CL/BatchNormalizationLayer.cpp
index ef53515..8c14306 100644
--- a/tests/validation/CL/BatchNormalizationLayer.cpp
+++ b/tests/validation/CL/BatchNormalizationLayer.cpp
@@ -61,8 +61,11 @@
 template <typename T>
 using CLBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>;
 
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::RandomBatchNormalizationLayerDataset(), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F16, DataType::F32 })),
-               shape0, shape1, epsilon, dt)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                           combine(framework::dataset::make("UseBeta", { false, true }),
+                                                                                   framework::dataset::make("UseGamma", { false, true }))),
+                                                                   framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F16, DataType::F32 })),
+               shape0, shape1, epsilon, use_gamma, use_beta, dt)
 {
     // Set fixed point position data type allowed
     const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
@@ -77,7 +80,9 @@
 
     // Create and Configure function
     CLBatchNormalizationLayer norm;
-    norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+    CLTensor                 *beta_ptr  = use_beta ? &beta : nullptr;
+    CLTensor                 *gamma_ptr = use_gamma ? &gamma : nullptr;
+    norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon);
 
     // Validate valid region
     const ValidRegion valid_region = shape_to_valid_region(shape0);
@@ -150,7 +155,9 @@
 
 TEST_SUITE(Float)
 TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                                                                   combine(framework::dataset::make("UseBeta", { false, true }),
+                                                                                                                           framework::dataset::make("UseGamma", { false, true }))),
                                                                                                                    act_infos),
                                                                                                                    framework::dataset::make("DataType", DataType::F32)))
 {
@@ -160,7 +167,9 @@
 TEST_SUITE_END()
 
 TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                                                                  combine(framework::dataset::make("UseBeta", { false, true }),
+                                                                                                                          framework::dataset::make("UseGamma", { false, true }))),
                                                                                                                   framework::dataset::make("ActivationInfo", ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::BOUNDED_RELU, 6.f))),
                                                                                                                   framework::dataset::make("DataType", DataType::F16)))
 {
@@ -175,10 +184,13 @@
 using CLBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<CLTensor, CLAccessor, CLBatchNormalizationLayer, T>;
 
 TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
-                       framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
-                       framework::dataset::make("DataType", DataType::QS8)),
-                       framework::dataset::make("FractionalBits", 1, 6)))
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                               framework::dataset::make("UseBeta", false)),
+                                                       framework::dataset::make("UseGamma", false)),
+                                               framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+                                       framework::dataset::make("DataType", DataType::QS8)),
+                               framework::dataset::make("FractionalBits", 1, 6)))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_qs8, 0);
@@ -186,10 +198,13 @@
 TEST_SUITE_END()
 
 TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
-                       framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
-                       framework::dataset::make("DataType", DataType::QS16)),
-                       framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(Random, CLBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                               framework::dataset::make("UseBeta", false)),
+                                                       framework::dataset::make("UseGamma", false)),
+                                               framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+                                       framework::dataset::make("DataType", DataType::QS16)),
+                               framework::dataset::make("FractionalBits", 1, 14)))
 {
     // Validate output
     validate(CLAccessor(_target), _reference, tolerance_qs16, 0);
diff --git a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
index d817fc0..2dbb0e0 100644
--- a/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
+++ b/tests/validation/GLES_COMPUTE/BatchNormalizationLayer.cpp
@@ -59,8 +59,11 @@
 template <typename T>
 using GCBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<GCTensor, GCAccessor, GCBatchNormalizationLayer, T>;
 
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::RandomBatchNormalizationLayerDataset(), framework::dataset::make("DataType", { DataType::F32 })),
-               shape0, shape1, epsilon, dt)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                           combine(framework::dataset::make("UseBeta", { false, true }),
+                                                                                   framework::dataset::make("UseGamma", { false, true }))),
+                                                                   framework::dataset::make("DataType", { DataType::F32 })),
+               shape0, shape1, epsilon, use_beta, use_gamma, dt)
 {
     // Set fixed point position data type allowed
     int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
@@ -75,7 +78,9 @@
 
     // Create and Configure function
     GCBatchNormalizationLayer norm;
-    norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+    GCTensor                 *beta_ptr  = use_beta ? &beta : nullptr;
+    GCTensor                 *gamma_ptr = use_gamma ? &gamma : nullptr;
+    norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon);
 
     // Validate valid region
     const ValidRegion valid_region = shape_to_valid_region(shape0);
@@ -84,7 +89,9 @@
 
 TEST_SUITE(Float)
 TEST_SUITE(FP16)
-FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                                                                  combine(framework::dataset::make("UseBeta", { false, true }),
+                                                                                                                          framework::dataset::make("UseGamma", { false, true }))),
                                                                                                                   act_infos),
                                                                                                                   framework::dataset::make("DataType", DataType::F16)))
 {
@@ -94,7 +101,9 @@
 TEST_SUITE_END()
 
 TEST_SUITE(FP32)
-FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, GCBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                                                                   combine(framework::dataset::make("UseBeta", { false, true }),
+                                                                                                                           framework::dataset::make("UseGamma", { false, true }))),
                                                                                                                    act_infos),
                                                                                                                    framework::dataset::make("DataType", DataType::F32)))
 {
diff --git a/tests/validation/NEON/BatchNormalizationLayer.cpp b/tests/validation/NEON/BatchNormalizationLayer.cpp
index 054ed27..7bf1f26 100644
--- a/tests/validation/NEON/BatchNormalizationLayer.cpp
+++ b/tests/validation/NEON/BatchNormalizationLayer.cpp
@@ -63,8 +63,10 @@
 template <typename T>
 using NEBatchNormalizationLayerFixture = BatchNormalizationLayerValidationFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>;
 
-DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(datasets::RandomBatchNormalizationLayerDataset(), framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })),
-               shape0, shape1, epsilon, dt)
+DATA_TEST_CASE(Configuration, framework::DatasetMode::ALL, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                           combine(framework::dataset::make("UseBeta", { false, true }), framework::dataset::make("UseGamma", { false, true }))),
+                                                                   framework::dataset::make("DataType", { DataType::QS8, DataType::QS16, DataType::F32 })),
+               shape0, shape1, epsilon, use_beta, use_gamma, dt)
 {
     // Set fixed point position data type allowed
     const int fixed_point_position = (arm_compute::is_data_type_fixed_point(dt)) ? 3 : 0;
@@ -79,7 +81,9 @@
 
     // Create and Configure function
     NEBatchNormalizationLayer norm;
-    norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon);
+    Tensor                   *beta_ptr  = use_beta ? &beta : nullptr;
+    Tensor                   *gamma_ptr = use_gamma ? &gamma : nullptr;
+    norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon);
 
     // Validate valid region
     const ValidRegion valid_region = shape_to_valid_region(shape0);
@@ -150,7 +154,9 @@
 // *INDENT-ON*
 
 TEST_SUITE(Float)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<float>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                                                                   combine(framework::dataset::make("UseBeta", { false, true }),
+                                                                                                                           framework::dataset::make("UseGamma", { false, true }))),
                                                                                                                    act_infos),
                                                                                                                    framework::dataset::make("DataType", DataType::F32)))
 {
@@ -161,7 +167,9 @@
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 TEST_SUITE(Float16)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixture<half>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                                                                                  combine(framework::dataset::make("UseBeta", { false, true }),
+                                                                                                                          framework::dataset::make("UseGamma", { false, true }))),
                                                                                                                   framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
                                                                                                                   framework::dataset::make("DataType", DataType::F16)))
 {
@@ -176,10 +184,13 @@
 using NEBatchNormalizationLayerFixedPointFixture = BatchNormalizationLayerValidationFixedPointFixture<Tensor, Accessor, NEBatchNormalizationLayer, T>;
 
 TEST_SUITE(QS8)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
-                       framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
-                       framework::dataset::make("DataType", DataType::QS8)),
-                       framework::dataset::make("FractionalBits", 1, 6)))
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int8_t>, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                               framework::dataset::make("UseBeta", false)),
+                                                       framework::dataset::make("UseGamma", false)),
+                                               framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+                                       framework::dataset::make("DataType", DataType::QS8)),
+                               framework::dataset::make("FractionalBits", 1, 6)))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_qs8, 0);
@@ -187,10 +198,13 @@
 TEST_SUITE_END()
 
 TEST_SUITE(QS16)
-FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT, combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
-                       framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
-                       framework::dataset::make("DataType", DataType::QS16)),
-                       framework::dataset::make("FractionalBits", 1, 14)))
+FIXTURE_DATA_TEST_CASE(Random, NEBatchNormalizationLayerFixedPointFixture<int16_t>, framework::DatasetMode::PRECOMMIT,
+                       combine(combine(combine(combine(combine(datasets::RandomBatchNormalizationLayerDataset(),
+                                                               framework::dataset::make("UseBeta", false)),
+                                                       framework::dataset::make("UseGamma", false)),
+                                               framework::dataset::make("ActivationInfo", ActivationLayerInfo())),
+                                       framework::dataset::make("DataType", DataType::QS16)),
+                               framework::dataset::make("FractionalBits", 1, 14)))
 {
     // Validate output
     validate(Accessor(_target), _reference, tolerance_qs16, 0);
diff --git a/tests/validation/fixtures/BatchNormalizationLayerFixture.h b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
index e02c619..4a6ac1a 100644
--- a/tests/validation/fixtures/BatchNormalizationLayerFixture.h
+++ b/tests/validation/fixtures/BatchNormalizationLayerFixture.h
@@ -45,10 +45,12 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape0, TensorShape shape1, float epsilon, ActivationLayerInfo act_info, DataType dt, int fractional_bits)
+    void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt, int fractional_bits)
     {
         _fractional_bits = fractional_bits;
         _data_type       = dt;
+        _use_beta        = use_beta;
+        _use_gamma       = use_gamma;
         _target          = compute_target(shape0, shape1, epsilon, act_info, dt, fractional_bits);
         _reference       = compute_reference(shape0, shape1, epsilon, act_info, dt, fractional_bits);
     }
@@ -67,8 +69,24 @@
             library->fill(src_tensor, distribution, 0);
             library->fill(mean_tensor, distribution, 1);
             library->fill(var_tensor, distribution_var, 0);
-            library->fill(beta_tensor, distribution, 3);
-            library->fill(gamma_tensor, distribution, 4);
+            if(_use_beta)
+            {
+                library->fill(beta_tensor, distribution, 3);
+            }
+            else
+            {
+                // Fill with default value 0.f
+                library->fill_tensor_value(beta_tensor, 0.f);
+            }
+            if(_use_gamma)
+            {
+                library->fill(gamma_tensor, distribution, 4);
+            }
+            else
+            {
+                // Fill with default value 1.f
+                library->fill_tensor_value(gamma_tensor, 1.f);
+            }
         }
         else
         {
@@ -80,8 +98,24 @@
             library->fill(src_tensor, distribution, 0);
             library->fill(mean_tensor, distribution, 1);
             library->fill(var_tensor, distribution_var, 0);
-            library->fill(beta_tensor, distribution, 3);
-            library->fill(gamma_tensor, distribution, 4);
+            if(_use_beta)
+            {
+                library->fill(beta_tensor, distribution, 3);
+            }
+            else
+            {
+                // Fill with default value 0
+                library->fill_tensor_value(beta_tensor, static_cast<T>(0));
+            }
+            if(_use_gamma)
+            {
+                library->fill(gamma_tensor, distribution, 4);
+            }
+            else
+            {
+                // Fill with default value 1
+                library->fill_tensor_value(gamma_tensor, static_cast<T>(1 << (_fractional_bits)));
+            }
         }
     }
 
@@ -97,7 +131,9 @@
 
         // Create and configure function
         FunctionType norm;
-        norm.configure(&src, &dst, &mean, &var, &beta, &gamma, epsilon, act_info);
+        TensorType *beta_ptr  = _use_beta ? &beta : nullptr;
+        TensorType *gamma_ptr = _use_gamma ? &gamma : nullptr;
+        norm.configure(&src, &dst, &mean, &var, beta_ptr, gamma_ptr, epsilon, act_info);
 
         ARM_COMPUTE_EXPECT(src.info()->is_resizable(), framework::LogLevel::ERRORS);
         ARM_COMPUTE_EXPECT(dst.info()->is_resizable(), framework::LogLevel::ERRORS);
@@ -149,6 +185,8 @@
     SimpleTensor<T> _reference{};
     int             _fractional_bits{};
     DataType        _data_type{};
+    bool            _use_beta{};
+    bool            _use_gamma{};
 };
 
 template <typename TensorType, typename AccessorType, typename FunctionType, typename T>
@@ -156,9 +194,9 @@
 {
 public:
     template <typename...>
-    void setup(TensorShape shape0, TensorShape shape1, float epsilon, ActivationLayerInfo act_info, DataType dt)
+    void setup(TensorShape shape0, TensorShape shape1, float epsilon, bool use_beta, bool use_gamma, ActivationLayerInfo act_info, DataType dt)
     {
-        BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, act_info, dt, 0);
+        BatchNormalizationLayerValidationFixedPointFixture<TensorType, AccessorType, FunctionType, T>::setup(shape0, shape1, epsilon, use_beta, use_gamma, act_info, dt, 0);
     }
 };
 } // namespace validation
diff --git a/tests/validation/reference/BatchNormalizationLayer.cpp b/tests/validation/reference/BatchNormalizationLayer.cpp
index a9d9f03..c8badac 100644
--- a/tests/validation/reference/BatchNormalizationLayer.cpp
+++ b/tests/validation/reference/BatchNormalizationLayer.cpp
@@ -106,7 +106,6 @@
                     const float numerator   = src[pos] - mean[i];
                     const float x_bar       = numerator / denominator;
                     result[pos]             = beta[i] + x_bar * gamma[i];
-                    ;
                 }
             }
         }