Port the ClGemmLowp kernels to the new API
Ported kernels:
- CLGEMMLowpMatrixMultiplyNativeKernel
- CLGEMMLowpMatrixMultiplyReshapedKernel
- CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel
- CLGEMMLowpOffsetContributionKernel
- CLGEMMLowpOffsetContributionOutputStageKernel
- CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
- CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel
- CLGEMMLowpQuantizeDownInt32ScaleKernel
Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I9d5a744d6a2dd2f2726fdfb291bad000b6970de2
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5870
Reviewed-by: Michele Di Giorgio <michele.digiorgio@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp b/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp
index a44dcd2..589523a 100644
--- a/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp
+++ b/src/runtime/CL/functions/CLLSTMLayerQuantized.cpp
@@ -28,11 +28,6 @@
#include "arm_compute/core/Validate.h"
#include "arm_compute/core/utils/quantization/AsymmHelpers.h"
#include "src/core/CL/kernels/CLFillBorderKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyNativeKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpMatrixMultiplyReshapedOnlyRHSKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpOffsetContributionOutputStageKernel.h"
-#include "src/core/CL/kernels/CLGEMMLowpReductionKernel.h"
#include "src/core/helpers/AutoConfiguration.h"
#include <memory>
@@ -179,7 +174,13 @@
quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift);
_memory_group.manage(&_output_lowp);
- _output_stage.configure(compile_context, &_output_highp, &_bias, &_output_lowp, output_multiplier, output_shift);
+
+ GEMMLowpOutputStageInfo info{};
+ info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
+ info.gemmlowp_multiplier = output_multiplier;
+ info.gemmlowp_shift = output_shift;
+ info.output_data_type = DataType::QSYMM16;
+ _output_stage.configure(compile_context, &_output_highp, &_bias, &_output_lowp, info);
_output_highp.allocator()->allocate();
_bias.allocator()->allocate();
@@ -386,7 +387,12 @@
ARM_COMPUTE_RETURN_ON_ERROR(quantization::calculate_quantized_multiplier(multiplier, &output_multiplier, &output_shift));
// _output_stage
- ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint::validate(&output_highp, &bias_concatenated, &output_lowp));
+ GEMMLowpOutputStageInfo info{};
+ info.type = GEMMLowpOutputStageType::QUANTIZE_DOWN_FIXEDPOINT;
+ info.gemmlowp_multiplier = output_multiplier;
+ info.gemmlowp_shift = output_shift;
+ info.output_data_type = DataType::QSYMM16;
+ ARM_COMPUTE_RETURN_ON_ERROR(CLGEMMLowpOutputStage::validate(&output_highp, &bias_concatenated, &output_lowp, info));
TensorInfo input_gate_input;
TensorInfo forget_gate_input;