CLInstanceNormalizationLayer NHWC optimisation

* Make changes to split the workload into two kernels. One kernel precomputes
  mean and variance and the second kernel just loads these precomputed values.

* The new approach runs %30 faster than the original code for NHWC workloads
  like 32x192x256.

* Resolves MLCE-337

Change-Id: I8356fcefa2d131ab4dcb32268ce7142421d073e4
Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5355
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
diff --git a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
index 9bc060e..f2406d6 100644
--- a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -23,13 +23,24 @@
  */
 #include "arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h"
 
+#include "arm_compute/core/Error.h"
 #include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLHelpers.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "src/core/CL/ICLKernel.h"
 #include "src/core/CL/kernels/CLFillBorderKernel.h"
 #include "src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h"
 
 namespace arm_compute
 {
-CLInstanceNormalizationLayer::CLInstanceNormalizationLayer()
+CLInstanceNormalizationLayer::CLInstanceNormalizationLayer(CLRuntimeContext *ctx) // NOLINT
+    : _inst_norm_kernel(),
+      _mean_var_kernel(),
+      _mean_var_tensor(),
+      _ctx(ctx)
+{
+}
+CLInstanceNormalizationLayer::~CLInstanceNormalizationLayer()
 {
 }
 
@@ -40,13 +51,25 @@
 
 void CLInstanceNormalizationLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
 {
-    auto k = std::make_unique<CLInstanceNormalizationLayerKernel>();
-    k->configure(compile_context, input, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
-    _kernel = std::move(k);
+    auto w = std::make_unique<CLComputeMeanVariance>();
+    w->configure(compile_context, input, &_mean_var_tensor);
+    _mean_var_kernel = std::move(w);
+    auto k           = std::make_unique<CLInstanceNormalizationLayerKernel>();
+    k->configure(compile_context, input, &_mean_var_tensor, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
+    _inst_norm_kernel = std::move(k);
+    _mean_var_tensor.allocator()->allocate();
 }
 
 Status CLInstanceNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
 {
     return CLInstanceNormalizationLayerKernel::validate(input, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
 }
-} // namespace arm_compute
\ No newline at end of file
+
+void CLInstanceNormalizationLayer::run()
+{
+    ARM_COMPUTE_ERROR_ON_MSG(!_inst_norm_kernel, "The child class didn't set the CL kernel or function isn't configured");
+    schedule_kernel_on_ctx(_ctx, _mean_var_kernel.get());
+    schedule_kernel_on_ctx(_ctx, _inst_norm_kernel.get());
+}
+
+} // namespace arm_compute