COMPMID-926 Add depth multiplier support to NEON/CL/GLES depthwise convolution

Change-Id: I03f32c62350e5ea43e77bb15fc5a832d83719e3b
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/126657
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h b/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
index 0c2f30a..bd9e7eb 100644
--- a/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
+++ b/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
@@ -53,23 +53,25 @@
     NEDepthwiseConvolutionLayer3x3Kernel &operator=(NEDepthwiseConvolutionLayer3x3Kernel &&) = default;
     /** Initialize the function's source, destination, conv and border_size.
      *
-     * @param[in]  input       Source tensor. DataType supported: QASYMM8, F32.
-     * @param[in]  weights     Weights tensor. This is a 3D tensor with dimensions [3, 3, IFM]. Data type supported: Same as @p input.
-     * @param[out] output      Destination tensor. Data type supported: Same as @p input.
-     * @param[in]  conv_info   Padding and stride information to use for the convolution.
-     * @param[in]  data_layout (Optional) Data layout of the input and weights tensor
+     * @param[in]  input            Source tensor. DataType supported: QASYMM8, F32.
+     * @param[in]  weights          Weights tensor. This is a 3D tensor with dimensions [3, 3, IFM]. Data type supported: Same as @p input.
+     * @param[out] output           Destination tensor. Data type supported: Same as @p input.
+     * @param[in]  conv_info        Padding and stride information to use for the convolution.
+     * @param[in]  depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
+     * @param[in]  data_layout      (Optional) Data layout of the input and weights tensor
      */
-    void configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, DataLayout data_layout = DataLayout::NCHW);
+    void configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, DataLayout data_layout = DataLayout::NCHW);
     /** Static method that checks if optimized execution is supported for the given parameters
      *
-     * @param[in] input_shape Input shape
-     * @param[in] conv_info   Padding and stride information to use for the convolution.
-     * @param[in] dt          Data type of the input and weights
-     * @param[in] data_layout (Optional) Data layout of the input and weights tensor
+     * @param[in] input_shape      Input shape
+     * @param[in] conv_info        Padding and stride information to use for the convolution.
+     * @param[in] dt               Data type of the input and weights
+     * @param[in] data_layout      (Optional) Data layout of the input and weights tensor
+     * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
      *
      * @return True if the optimized kernels can be executed else false
      */
-    static bool is_optimized_execution_possible(TensorShape input_shape, PadStrideInfo conv_info, DataType dt, DataLayout data_layout = DataLayout::NCHW);
+    static bool is_optimized_execution_possible(TensorShape input_shape, PadStrideInfo conv_info, DataType dt, unsigned int depth_multiplier = 1, DataLayout data_layout = DataLayout::NCHW);
     /** Generates the convolver object */
     void generate_convolver();
 
@@ -110,6 +112,7 @@
     std::unique_ptr<depthwise::IDepthwiseConvolution> _convolver;
     unsigned int                                      _num_elems_written_per_iteration;
     bool                                              _run_optimized;
+    unsigned int                                      _depth_multiplier;
 };
 } // namespace arm_compute
 #endif /* __ARM_COMPUTE_NEDEPTHWISECONVOLUTIONKERNEL3x3_H__ */
diff --git a/arm_compute/core/NEON/kernels/NEDepthwiseIm2ColKernel.h b/arm_compute/core/NEON/kernels/NEDepthwiseIm2ColKernel.h
index ca10bfa..9c11cfa 100644
--- a/arm_compute/core/NEON/kernels/NEDepthwiseIm2ColKernel.h
+++ b/arm_compute/core/NEON/kernels/NEDepthwiseIm2ColKernel.h
@@ -54,15 +54,16 @@
     NEDepthwiseIm2ColKernel &operator=(NEDepthwiseIm2ColKernel &&) = default;
     /** Set the input and output of the kernel.
      *
-     * @param[in]  input       The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
-     *                         while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8, F32
-     * @param[out] output      The output tensor. First 3 lower dimensions represent a transform of each 3D input,
-     *                         while every dimension above 3 represents a batch. Data types supported: Same as @p input
-     * @param[in]  kernel_dims The kernel dimensions (width and height).
-     * @param[in]  conv_info   Contains padding and stride information described in @ref PadStrideInfo.
-     * @param[in]  has_bias    Boolean that specifies if the depthwise convolution has bias.
+     * @param[in]  input            The input tensor to convert. 3 lower dimensions represent a single input [width, height, IFM],
+     *                              while every optional dimension from 4 and above represent a batch of inputs. Data types supported: QASYMM8, F32
+     * @param[out] output           The output tensor. First 3 lower dimensions represent a transform of each 3D input,
+     *                              while every dimension above 3 represents a batch. Data types supported: Same as @p input
+     * @param[in]  kernel_dims      The kernel dimensions (width and height).
+     * @param[in]  conv_info        Contains padding and stride information described in @ref PadStrideInfo.
+     * @param[in]  has_bias         Boolean that specifies if the depthwise convolution has bias.
+     * @param[in]  depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
      */
-    void configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias = false);
+    void configure(const ITensor *input, ITensor *output, const Size2D &kernel_dims, const PadStrideInfo &conv_info, bool has_bias = false, unsigned int depth_multiplier = 1);
 
     // Inherited methods overridden:
     void run(const Window &window, const ThreadInfo &info) override;
@@ -87,6 +88,7 @@
     Size2D                     _kernel_dims;
     PadStrideInfo              _conv_info;
     bool                       _has_bias;
+    unsigned int               _depth_multiplier;
 };
 } // arm_compute
 #endif /*__ARM_COMPUTE_NEDEPTHWISEIM2COLKERNEL_H__ */