COMPMID-926 Add depth multiplier support to NEON/CL/GLES depthwise convolution

Change-Id: I03f32c62350e5ea43e77bb15fc5a832d83719e3b
Reviewed-on: https://eu-gerrit-1.euhpc.arm.com/126657
Tested-by: Jenkins <bsgcomp@arm.com>
Reviewed-by: Michele DiGiorgio <michele.digiorgio@arm.com>
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
diff --git a/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h b/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
index 0c2f30a..bd9e7eb 100644
--- a/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
+++ b/arm_compute/core/NEON/kernels/NEDepthwiseConvolutionLayer3x3Kernel.h
@@ -53,23 +53,25 @@
     NEDepthwiseConvolutionLayer3x3Kernel &operator=(NEDepthwiseConvolutionLayer3x3Kernel &&) = default;
     /** Initialize the function's source, destination, conv and border_size.
      *
-     * @param[in]  input       Source tensor. DataType supported: QASYMM8, F32.
-     * @param[in]  weights     Weights tensor. This is a 3D tensor with dimensions [3, 3, IFM]. Data type supported: Same as @p input.
-     * @param[out] output      Destination tensor. Data type supported: Same as @p input.
-     * @param[in]  conv_info   Padding and stride information to use for the convolution.
-     * @param[in]  data_layout (Optional) Data layout of the input and weights tensor
+     * @param[in]  input            Source tensor. DataType supported: QASYMM8, F32.
+     * @param[in]  weights          Weights tensor. This is a 3D tensor with dimensions [3, 3, IFM]. Data type supported: Same as @p input.
+     * @param[out] output           Destination tensor. Data type supported: Same as @p input.
+     * @param[in]  conv_info        Padding and stride information to use for the convolution.
+     * @param[in]  depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
+     * @param[in]  data_layout      (Optional) Data layout of the input and weights tensor
      */
-    void configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, DataLayout data_layout = DataLayout::NCHW);
+    void configure(const ITensor *input, const ITensor *weights, ITensor *output, const PadStrideInfo &conv_info, unsigned int depth_multiplier = 1, DataLayout data_layout = DataLayout::NCHW);
     /** Static method that checks if optimized execution is supported for the given parameters
      *
-     * @param[in] input_shape Input shape
-     * @param[in] conv_info   Padding and stride information to use for the convolution.
-     * @param[in] dt          Data type of the input and weights
-     * @param[in] data_layout (Optional) Data layout of the input and weights tensor
+     * @param[in] input_shape      Input shape
+     * @param[in] conv_info        Padding and stride information to use for the convolution.
+     * @param[in] dt               Data type of the input and weights
+     * @param[in] data_layout      (Optional) Data layout of the input and weights tensor
+     * @param[in] depth_multiplier (Optional) Multiplier to apply to the input's depth in order to retrieve the output's depth. Defaults to 1.
      *
      * @return True if the optimized kernels can be executed else false
      */
-    static bool is_optimized_execution_possible(TensorShape input_shape, PadStrideInfo conv_info, DataType dt, DataLayout data_layout = DataLayout::NCHW);
+    static bool is_optimized_execution_possible(TensorShape input_shape, PadStrideInfo conv_info, DataType dt, unsigned int depth_multiplier = 1, DataLayout data_layout = DataLayout::NCHW);
     /** Generates the convolver object */
     void generate_convolver();
 
@@ -110,6 +112,7 @@
     std::unique_ptr<depthwise::IDepthwiseConvolution> _convolver;
     unsigned int                                      _num_elems_written_per_iteration;
     bool                                              _run_optimized;
+    unsigned int                                      _depth_multiplier;
 };
 } // namespace arm_compute
 #endif /* __ARM_COMPUTE_NEDEPTHWISECONVOLUTIONKERNEL3x3_H__ */