COMPMID-3829: Create CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel and remove padding from related OpenCL kernels
Change-Id: I0b0be8fcccf511c7214e83ba6aa8d0e901bc4f3c
Signed-off-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/4146
Reviewed-by: Georgios Pinitas <georgios.pinitas@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h b/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h
index c6e9588..44c52ff 100644
--- a/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h
+++ b/arm_compute/runtime/CL/functions/CLGEMMLowpOutputStage.h
@@ -61,7 +61,7 @@
*
* This function calls the following OpenCL kernels:
*
- * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel
+ * -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
*
* @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
* after the result is shifted right by result_shift
@@ -139,7 +139,7 @@
*
* This function calls the following OpenCL kernels:
*
- * -# @ref CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel
+ * -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
*
* @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
* after the result is shifted right by result_shift
@@ -217,7 +217,7 @@
*
* This function calls the following NEON kernels:
*
- * -# @ref CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel
+ * -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
*
* @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
* after the result is shifted right by result_shift
@@ -274,9 +274,7 @@
*
* -# @ref CLGEMMLowpQuantizeDownInt32ScaleKernel
* -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel
- * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel
- * -# @ref CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel
- * -# @ref CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel
+ * -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
*/
class CLGEMMLowpOutputStage : public ICLSimpleFunction
{
@@ -300,7 +298,7 @@
* @param[in] info GEMMLowp output stage metadata.
*/
void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo &info);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel
+ /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
*
* @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
* @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.