CLInstanceNormalizationLayer NHWC optimisation
* Make changes to split the workload into two kernels. One kernel precomputes
mean and variance and the second kernel just loads these precomputed values.
* The new approach runs %30 faster than the original code for NHWC workloads
like 32x192x256.
* Resolves MLCE-337
Change-Id: I8356fcefa2d131ab4dcb32268ce7142421d073e4
Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5355
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
diff --git a/src/core/CL/CLKernelLibrary.cpp b/src/core/CL/CLKernelLibrary.cpp
index eef204f..002a144 100644
--- a/src/core/CL/CLKernelLibrary.cpp
+++ b/src/core/CL/CLKernelLibrary.cpp
@@ -356,6 +356,7 @@
{ "im2col9x9_nhwc", "im2col.cl" },
{ "im2col_generic_nhwc", "im2col.cl" },
{ "instance_normalization", "instance_normalization.cl" },
+ { "compute_mean_var", "instance_normalization.cl" },
{ "l2_normalize_x", "l2_normalize.cl" },
{ "l2_normalize_y", "l2_normalize.cl" },
{ "l2_normalize_z", "l2_normalize.cl" },
diff --git a/src/core/CL/cl_kernels/instance_normalization.cl b/src/core/CL/cl_kernels/instance_normalization.cl
index 480d9cd..d2507d9 100644
--- a/src/core/CL/cl_kernels/instance_normalization.cl
+++ b/src/core/CL/cl_kernels/instance_normalization.cl
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,6 +23,118 @@
*/
#include "helpers.h"
+#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z)
+/** This function computes the mean and variance of each plane of the input tensor and provides it as output.
+ *
+ * @attention Vector size should be given as a preprocessor argument using -DVEC_SIZE=size. e.g. -DVEC_SIZE=16
+ * @attention Data type should be passed using the -DDATA_TYPE=data_type compile flag, e.g. -DDATA_TYPE=float
+ * @attention Dimensions X, Y, and Z should be given as a preprocessor argument with -DDIM_X=value, -DDIM_Y=value, -DDIM_Z=value. e.g. -DDIM_X=6, -DDIM_Y=2, -DDIM_Z=7
+ *
+ * @param[in] input_ptr Pointer to the first source tensor. Supported data types: F16/F32
+ * @param[in] input_stride_x Stride of the first source tensor in X dimension (in bytes)
+ * @param[in] input_step_x input_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] input_stride_y Stride of the first source tensor in Y dimension (in bytes)
+ * @param[in] input_step_y input_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] input_stride_z Stride of the first source tensor in Z dimension (in bytes)
+ * @param[in] input_step_z input_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] input_stride_w Stride of the source tensor in W dimension (in bytes)
+ * @param[in] input_step_w input_stride_w * number of elements along W processed per workitem(in bytes)
+ * @param[in] input_offset_first_element_in_bytes The offset of the first element in the first source tensor
+ * @param[out] output_ptr (Optional) Pointer to the destination tensor. Supported data types: same as @p input_ptr
+ * @param[in] output_stride_x (Optional) Stride of the destination tensor in X dimension (in bytes)
+ * @param[in] output_step_x (Optional) output_stride_x * number of elements along X processed per workitem(in bytes)
+ * @param[in] output_stride_y (Optional) Stride of the destination tensor in Y dimension (in bytes)
+ * @param[in] output_step_y (Optional) output_stride_y * number of elements along Y processed per workitem(in bytes)
+ * @param[in] output_stride_z (Optional) Stride of the destination tensor in Z dimension (in bytes)
+ * @param[in] output_step_z (Optional) output_stride_z * number of elements along Z processed per workitem(in bytes)
+ * @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor
+ */
+__kernel void compute_mean_var(
+ TENSOR4D_DECLARATION(input),
+ TENSOR3D_DECLARATION(output))
+{
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Tensor3D out = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output);
+
+#if defined(NHWC)
+ const int ch = get_global_id(0); // Current channel
+ const int batch = get_global_id(1); // Current batch
+ const int elements_plane = DIM_Y * DIM_Z;
+ float part_sum = 0.f;
+ float part_sum_sq = 0.f;
+ const int in_offset = input_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
+ for(int i = 0; i < (DIM_Y * DIM_Z); ++i)
+ {
+ const float data = *((__global DATA_TYPE *)(input_ptr + in_offset + i * input_stride_y));
+ part_sum += data;
+ part_sum_sq += data * data;
+ }
+ float mean = (part_sum / elements_plane);
+ float var = (part_sum_sq / elements_plane) - (mean * mean);
+ __global DATA_TYPE *output_address0 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 0, batch);
+ *output_address0 = mean;
+ __global DATA_TYPE *output_address1 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 1, batch);
+ *output_address1 = var;
+#else // !defined(NHWC)
+ const int ch = get_global_id(2) % DIM_Z; // Current channel
+ const int batch = get_global_id(2) / DIM_Z; // Current batch
+ const int elements_plane = DIM_X * DIM_Y;
+
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ part_sum = 0.f;
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ part_sum_sq = 0.f;
+ // Calculate partial sum
+ for(int y = 0; y < DIM_Y; ++y)
+ {
+ int x = 0;
+ for(; x <= (DIM_X - VEC_SIZE); x += VEC_SIZE)
+ {
+ // Load data
+ VEC_DATA_TYPE(float, VEC_SIZE)
+ data = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)), VEC_DATA_TYPE(float, VEC_SIZE));
+ part_sum += data;
+ part_sum_sq += data * data;
+ }
+ // Left-overs loop
+ for(; x < DIM_X; ++x)
+ {
+ float data = (float)(*((__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)));
+ part_sum.s0 += data;
+ part_sum_sq.s0 += data * data;
+ }
+ }
+ // Perform reduction
+#if VEC_SIZE > 8
+ part_sum.s01234567 += part_sum.s89abcdef;
+ part_sum_sq.s01234567 += part_sum_sq.s89abcdef;
+#endif // VEC_SIZE > 8
+#if VEC_SIZE > 4
+ part_sum.s0123 += part_sum.s4567;
+ part_sum_sq.s0123 += part_sum_sq.s4567;
+#endif // VEC_SIZE > 4
+#if VEC_SIZE > 2
+ part_sum.s01 += part_sum.s23;
+ part_sum_sq.s01 += part_sum_sq.s23;
+#endif // VEC_SIZE > 2
+ part_sum.s0 += part_sum.s1;
+ part_sum_sq.s0 += part_sum_sq.s1;
+
+ float sum = (float)part_sum.s0;
+ float sum_sq = (float)part_sum_sq.s0;
+
+ const float mean = (sum / elements_plane);
+ const float var = (sum_sq / elements_plane) - (mean * mean);
+
+ __global DATA_TYPE *output_address0 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 0, batch);
+ *output_address0 = mean;
+ __global DATA_TYPE *output_address1 = (__global DATA_TYPE *)tensor3D_offset(&out, ch, 1, batch);
+ *output_address1 = var;
+
+#endif // defined(NHWC)
+}
+#endif /* defined(VEC_SIZE) && defined(DATA_TYPE) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z) */
+
#if defined(VEC_SIZE) && defined(DATA_TYPE) && defined(INTERNAL_DATA_TYPE) && defined(GAMMA) && defined(BETA) && defined(EPSILON) && defined(DIM_X) && defined(DIM_Y) && defined(DIM_Z)
/** This function normalizes the input 2D tensor across the first dimension with respect to mean and standard deviation of the same dimension.
*
@@ -51,105 +163,50 @@
* @param[in] output_offset_first_element_in_bytes (Optional) The offset of the first element in the destination tensor
*/
__kernel void instance_normalization(
- TENSOR4D_DECLARATION(input)
+ TENSOR4D_DECLARATION(input),
+ TENSOR3D_DECLARATION(mean_var)
#ifndef IN_PLACE
,
TENSOR4D_DECLARATION(output)
#endif /* IN_PLACE */
)
{
- Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Tensor4D in = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(input, 0);
+ Tensor3D mean_var = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(mean_var);
#ifndef IN_PLACE
Tensor4D out = CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(output, 0);
#endif /* IN_PLACE */
- INTERNAL_DATA_TYPE sum = 0.f;
- INTERNAL_DATA_TYPE sum_sq = 0.f;
+#if defined(NHWC)
+ const int ch = get_global_id(0); // Current channel
+ const int batch = get_global_id(2); // Current batch
+#else /* defined(NHWC) */
+ const int ch = get_global_id(2) % DIM_Z; // Current channel
+ const int batch = get_global_id(2) / DIM_Z; // Current batch
+#endif /* defined(NHWC) */
+
+ const __global DATA_TYPE *mean_ptr = (__global DATA_TYPE *)tensor3D_offset(&mean_var, ch, 0, batch);
+ const __global DATA_TYPE *var_ptr = (__global DATA_TYPE *)tensor3D_offset(&mean_var, ch, 1, batch);
+ const INTERNAL_DATA_TYPE mean = (INTERNAL_DATA_TYPE) * mean_ptr;
+ const INTERNAL_DATA_TYPE var = (INTERNAL_DATA_TYPE) * var_ptr;
+ const INTERNAL_DATA_TYPE multip = GAMMA / sqrt(var + EPSILON);
+ const INTERNAL_DATA_TYPE beta = (INTERNAL_DATA_TYPE)BETA;
#if defined(NHWC)
-
- const int ch = get_global_id(0); // Current channel
- const int batch = get_global_id(2); // Current batch
- const int elements_plane = DIM_Y * DIM_Z;
-
- for(int i_w = 0; i_w < DIM_Y; ++i_w)
- {
- for(int i_h = 0; i_h < DIM_Z; ++i_h)
- {
- INTERNAL_DATA_TYPE data = (INTERNAL_DATA_TYPE) * ((__global DATA_TYPE *)tensor4D_offset(&in, ch, i_w, i_h, batch));
- sum += data;
- sum_sq += data * data;
- }
- }
-
-#else // !defined(NHWC)
- const int ch = get_global_id(2) % DIM_Z; // Current channel
- const int batch = get_global_id(2) / DIM_Z; // Current batch
- const int elements_plane = DIM_X * DIM_Y;
-
- VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE)
- part_sum = 0.f;
- VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE)
- part_sum_sq = 0.f;
- // Calculate partial sum
- for(int y = 0; y < DIM_Y; ++y)
- {
- int x = 0;
- for(; x <= (DIM_X - VEC_SIZE); x += VEC_SIZE)
- {
- // Load data
- VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE)
- data = CONVERT(VLOAD(VEC_SIZE)(0, (__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)), VEC_DATA_TYPE(INTERNAL_DATA_TYPE, VEC_SIZE));
- part_sum += data;
- part_sum_sq += data * data;
- }
- // Left-overs loop
- for(; x < DIM_X; ++x)
- {
- INTERNAL_DATA_TYPE data = (INTERNAL_DATA_TYPE)(*((__global DATA_TYPE *)tensor4D_offset(&in, x, y, ch, batch)));
- part_sum.s0 += data;
- part_sum_sq.s0 += data * data;
- }
- }
- // Perform reduction
-#if VEC_SIZE > 8
- part_sum.s01234567 += part_sum.s89abcdef;
- part_sum_sq.s01234567 += part_sum_sq.s89abcdef;
-#endif // VEC_SIZE > 8
-#if VEC_SIZE > 4
- part_sum.s0123 += part_sum.s4567;
- part_sum_sq.s0123 += part_sum_sq.s4567;
-#endif // VEC_SIZE > 4
-#if VEC_SIZE > 2
- part_sum.s01 += part_sum.s23;
- part_sum_sq.s01 += part_sum_sq.s23;
-#endif // VEC_SIZE > 2
- part_sum.s0 += part_sum.s1;
- part_sum_sq.s0 += part_sum_sq.s1;
-
- sum = (INTERNAL_DATA_TYPE)part_sum.s0;
- sum_sq = (INTERNAL_DATA_TYPE)part_sum_sq.s0;
-
-#endif // defined(NHWC)
-
- const INTERNAL_DATA_TYPE mean = (sum / elements_plane);
- const INTERNAL_DATA_TYPE var = (sum_sq / elements_plane) - (mean * mean);
- const INTERNAL_DATA_TYPE multip = GAMMA / sqrt(var + EPSILON);
-
-#if defined(NHWC)
-
- for(int i_w = 0; i_w < DIM_Y; ++i_w)
- {
- for(int i_h = 0; i_h < DIM_Z; ++i_h)
- {
- __global DATA_TYPE *input_address = (__global DATA_TYPE *)tensor4D_offset(&in, ch, i_w, i_h, batch);
-#ifdef IN_PLACE
- __global DATA_TYPE *output_address = input_address;
-#else /* !IN_PLACE */
- __global DATA_TYPE *output_address = (__global DATA_TYPE *)tensor4D_offset(&out, ch, i_w, i_h, batch);
+ const int in_offset = input_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
+#ifndef IN_PLACE
+ const int out_offset = output_offset_first_element_in_bytes + batch * input_stride_w + ch * sizeof(DATA_TYPE);
#endif /* IN_PLACE */
- *(output_address) = (*(input_address) - mean) * multip + (INTERNAL_DATA_TYPE)BETA;
- }
+
+ for(int i = 0; i < (DIM_Y * DIM_Z); ++i)
+ {
+ __global DATA_TYPE *input_address = (__global DATA_TYPE *)(input_ptr + in_offset + i * input_stride_y);
+#ifdef IN_PLACE
+ __global DATA_TYPE *output_address = input_address;
+#else /* !IN_PLACE */
+ __global DATA_TYPE *output_address = (__global DATA_TYPE *)(output_ptr + out_offset + i * output_stride_y);
+#endif /* IN_PLACE */
+ *(output_address) = (*(input_address) - mean) * multip + beta;
}
#else // !defined(NHWC)
diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
index 50c4e24..80a42cc 100644
--- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
+++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.cpp
@@ -32,7 +32,6 @@
#include "src/core/CL/CLValidate.h"
#include "src/core/helpers/AutoConfiguration.h"
#include "src/core/helpers/WindowHelpers.h"
-
#include "support/StringSupport.h"
namespace arm_compute
@@ -54,19 +53,28 @@
return Status{};
}
+
+Status validate_arguments_meanvar(const ITensorInfo *input, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32);
+
+ if(output != nullptr && output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(), "Input and output have different number of channels");
+ }
+
+ return Status{};
+}
} // namespace
-CLInstanceNormalizationLayerKernel::CLInstanceNormalizationLayerKernel()
- : _input(nullptr), _output(nullptr), _run_in_place(false)
+CLComputeMeanVariance::CLComputeMeanVariance()
+ : _input(nullptr), _output(nullptr)
{
}
-void CLInstanceNormalizationLayerKernel::configure(ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info)
-{
- configure(CLKernelLibrary::get().get_compile_context(), input, output, info);
-}
-
-void CLInstanceNormalizationLayerKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info)
+void CLComputeMeanVariance::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input);
auto padding_info = get_padding_info({ input, output });
@@ -74,6 +82,80 @@
_input = input;
_output = output == nullptr ? input : output;
+ ARM_COMPUTE_ERROR_THROW_ON(validate_arguments_meanvar(_input->info(), _output->info()));
+ const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
+
+ CLBuildOptions build_opts;
+ build_opts.add_option("-DDATA_TYPE=" + get_cl_type_from_data_type(input->info()->data_type()));
+ build_opts.add_option("-DVEC_SIZE=" + support::cpp11::to_string(num_elems_processed_per_iteration));
+ build_opts.add_option("-DDIM_X=" + support::cpp11::to_string(input->info()->dimension(0)));
+ build_opts.add_option("-DDIM_Y=" + support::cpp11::to_string(input->info()->dimension(1)));
+ build_opts.add_option("-DDIM_Z=" + support::cpp11::to_string(input->info()->dimension(2)));
+ build_opts.add_option_if(_input->info()->data_layout() == DataLayout::NHWC, "-DNHWC");
+ // Create kernel
+ _kernel = create_kernel(compile_context, "compute_mean_var", build_opts.options());
+
+ // We handle the planes manually
+ Window win = calculate_max_window(*(input->info()), Steps(1));
+ const auto data_layout = input->info()->data_layout();
+ const unsigned int channel_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::CHANNEL);
+ const unsigned int batches_idx = get_data_layout_dimension_index(data_layout, DataLayoutDimension::BATCHES);
+ const unsigned int input_channel = input->info()->dimension(channel_idx);
+ const unsigned int input_batches = input->info()->dimension(batches_idx);
+ const TensorShape out_shape(input_channel, 2u, input_batches);
+
+ // Output auto initialization if not yet initialized
+ auto_init_if_empty(*output->info(), out_shape, 1, input->info()->data_type());
+
+ ICLKernel::configure_internal(win);
+ ARM_COMPUTE_ERROR_ON(has_padding_changed(padding_info));
+}
+
+Status CLComputeMeanVariance::validate(const ITensorInfo *input, const ITensorInfo *output)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments_meanvar(input, output));
+ return Status{};
+}
+
+void CLComputeMeanVariance::run(const Window &window, cl::CommandQueue &queue)
+{
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(IKernel::window(), window);
+
+ Window collapsed_window = window.collapse(window, Window::DimZ);
+
+ // We will process the planes together
+ if(_input->info()->data_layout() == DataLayout::NCHW)
+ {
+ collapsed_window.set(Window::DimX, Window::Dimension(0, 1, 1));
+ collapsed_window.set(Window::DimY, Window::Dimension(0, 1, 1));
+ }
+ else
+ {
+ collapsed_window.set(Window::DimZ, Window::Dimension(0, 1, 1));
+ collapsed_window.set(Window::DimY, Window::Dimension(0, _input->info()->dimension(3), 1));
+ }
+ unsigned int idx = 0;
+ add_4D_tensor_argument(idx, _input, collapsed_window);
+ add_3D_tensor_argument(idx, _output, collapsed_window);
+
+ enqueue(queue, *this, collapsed_window, lws_hint());
+}
+
+CLInstanceNormalizationLayerKernel::CLInstanceNormalizationLayerKernel()
+ : _input(nullptr), _output(nullptr), _mean(nullptr), _run_in_place(false)
+{
+}
+
+void CLInstanceNormalizationLayerKernel::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *mean_var, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+ auto padding_info = get_padding_info({ input, output });
+
+ _input = input;
+ _output = output == nullptr ? input : output;
+ _mean = mean_var;
+
_run_in_place = (output == nullptr) || (output == input);
ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(_input->info(), _output->info(), info));
const unsigned int num_elems_processed_per_iteration = 16 / input->info()->element_size();
@@ -132,6 +214,8 @@
unsigned int idx = 0;
add_4D_tensor_argument(idx, _input, collapsed_window);
+ add_3D_tensor_argument(idx, _mean, collapsed_window);
+
if(!_run_in_place)
{
add_4D_tensor_argument(idx, _output, collapsed_window);
diff --git a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
index d4444f0..33a3ff9 100644
--- a/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
+++ b/src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -52,21 +52,14 @@
/** Set the input and output tensors.
*
- * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
- * In case of @p output tensor = nullptr this tensor will store the result of the normalization.
- * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
- * @param[in] info Kernel meta-data descriptor
- */
- void configure(ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info);
- /** Set the input and output tensors.
- *
* @param[in] compile_context The compile context to be used.
* @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
* In case of @p output tensor = nullptr this tensor will store the result of the normalization.
+ * @param[in] mean_var Tensor containing the precomputed mean and variance values. Data types supported: F32.
* @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
* @param[in] info Kernel meta-data descriptor
*/
- void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info);
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *mean_var, ICLTensor *output, const InstanceNormalizationLayerKernelInfo &info);
/** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
*
@@ -84,7 +77,51 @@
private:
ICLTensor *_input;
ICLTensor *_output;
+ ICLTensor *_mean;
bool _run_in_place;
};
+
+/** Interface for compute Mean and Variance per channel */
+class CLComputeMeanVariance : public ICLKernel
+{
+public:
+ /** Constructor */
+ CLComputeMeanVariance();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLComputeMeanVariance(const CLComputeMeanVariance &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ CLComputeMeanVariance &operator=(const CLComputeMeanVariance &) = delete;
+ /** Default Move Constructor. */
+ CLComputeMeanVariance(CLComputeMeanVariance &&) = default;
+ /** Default move assignment operator */
+ CLComputeMeanVariance &operator=(CLComputeMeanVariance &&) = default;
+ /** Default destructor */
+ ~CLComputeMeanVariance() = default;
+
+ /** Set the input and output tensors.
+ *
+ * @param[in] compile_context The compile context to be used.
+ * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported: NCHW, NHWC
+ * In case of @p output tensor = nullptr this tensor will store the result of the normalization.
+ * @param[out] output Destination tensor. Data types and data layouts supported: same as @p input.
+ */
+ void configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref CLInstanceNormalizationLayer.
+ *
+ * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported: NHWC, NCHW
+ * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p input.
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output);
+
+ // Inherited methods overridden:
+ void run(const Window &window, cl::CommandQueue &queue) override;
+
+private:
+ ICLTensor *_input;
+ ICLTensor *_output;
+};
} // namespace arm_compute
#endif /*ARM_COMPUTE_CLINSTANCENORMALIZATIONLAYERKERNEL_H */
diff --git a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
index 9bc060e..f2406d6 100644
--- a/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
+++ b/src/runtime/CL/functions/CLInstanceNormalizationLayer.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2019-2020 Arm Limited.
+ * Copyright (c) 2019-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -23,13 +23,24 @@
*/
#include "arm_compute/runtime/CL/functions/CLInstanceNormalizationLayer.h"
+#include "arm_compute/core/Error.h"
#include "arm_compute/core/Types.h"
+#include "arm_compute/runtime/CL/CLHelpers.h"
+#include "arm_compute/runtime/CL/CLScheduler.h"
+#include "src/core/CL/ICLKernel.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
#include "src/core/CL/kernels/CLInstanceNormalizationLayerKernel.h"
namespace arm_compute
{
-CLInstanceNormalizationLayer::CLInstanceNormalizationLayer()
+CLInstanceNormalizationLayer::CLInstanceNormalizationLayer(CLRuntimeContext *ctx) // NOLINT
+ : _inst_norm_kernel(),
+ _mean_var_kernel(),
+ _mean_var_tensor(),
+ _ctx(ctx)
+{
+}
+CLInstanceNormalizationLayer::~CLInstanceNormalizationLayer()
{
}
@@ -40,13 +51,25 @@
void CLInstanceNormalizationLayer::configure(const CLCompileContext &compile_context, ICLTensor *input, ICLTensor *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
{
- auto k = std::make_unique<CLInstanceNormalizationLayerKernel>();
- k->configure(compile_context, input, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
- _kernel = std::move(k);
+ auto w = std::make_unique<CLComputeMeanVariance>();
+ w->configure(compile_context, input, &_mean_var_tensor);
+ _mean_var_kernel = std::move(w);
+ auto k = std::make_unique<CLInstanceNormalizationLayerKernel>();
+ k->configure(compile_context, input, &_mean_var_tensor, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
+ _inst_norm_kernel = std::move(k);
+ _mean_var_tensor.allocator()->allocate();
}
Status CLInstanceNormalizationLayer::validate(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon, bool use_mixed_precision)
{
return CLInstanceNormalizationLayerKernel::validate(input, output, InstanceNormalizationLayerKernelInfo(gamma, beta, epsilon, use_mixed_precision));
}
-} // namespace arm_compute
\ No newline at end of file
+
+void CLInstanceNormalizationLayer::run()
+{
+ ARM_COMPUTE_ERROR_ON_MSG(!_inst_norm_kernel, "The child class didn't set the CL kernel or function isn't configured");
+ schedule_kernel_on_ctx(_ctx, _mean_var_kernel.get());
+ schedule_kernel_on_ctx(_ctx, _inst_norm_kernel.get());
+}
+
+} // namespace arm_compute