Round to nearest with ties to away from zero in Relu
* This patch adds support for rounding modes in vmlaq_qasymm8_signed
which is used to compute Relu for quantized types
* Partially resolves MLCE-1018
Change-Id: I2a267b84745430e1ffe92b8bc79828a39332db18
Signed-off-by: Pablo Marquez Tello <pablo.tello@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/9354
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Reviewed-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/cpu/kernels/activation/generic/neon/qasymm8.cpp b/src/cpu/kernels/activation/generic/neon/qasymm8.cpp
index 05a0b50..f555557 100644
--- a/src/cpu/kernels/activation/generic/neon/qasymm8.cpp
+++ b/src/cpu/kernels/activation/generic/neon/qasymm8.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020-2022 Arm Limited.
+ * Copyright (c) 2020-2023 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
@@ -101,21 +101,21 @@
// Perform activation
tmp = vmaxq_u8(vconst_0, vin);
// Re-quantize to new output space
- tmp = vmlaq_qasymm8(tmp, vs, vo);
+ tmp = vmlaq_qasymm8<RoundingPolicy::TO_NEAREST_UP>(tmp, vs, vo);
}
else if(act == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
{
// Perform activation
tmp = vminq_u8(va, vmaxq_u8(vconst_0, vin));
// Re-quantize to new output space
- tmp = vmlaq_qasymm8(tmp, vs, vo);
+ tmp = vmlaq_qasymm8<RoundingPolicy::TO_NEAREST_UP>(tmp, vs, vo);
}
else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
{
// Perform activation
tmp = vminq_u8(va, vmaxq_u8(vb, vin));
// Re-quantize to new output space
- tmp = vmlaq_qasymm8(tmp, vs, vo);
+ tmp = vmlaq_qasymm8<RoundingPolicy::TO_NEAREST_UP>(tmp, vs, vo);
}
#ifndef __aarch64__ // LUT-based implementation is used for aarch64 instead.
else if(act == ActivationLayerInfo::ActivationFunction::LOGISTIC)
@@ -230,17 +230,17 @@
if(act == ActivationLayerInfo::ActivationFunction::RELU)
{
tmp = std::max(const_0, in);
- tmp = utility::clamp<int32_t, qasymm8_t>(tmp * s + o);
+ tmp = utility::clamp<int32_t, qasymm8_t>(support::cpp11::lround(tmp * s + o));
}
else if(act == ActivationLayerInfo::ActivationFunction::BOUNDED_RELU)
{
tmp = std::min(a, std::max(const_0, in));
- tmp = utility::clamp<int32_t, qasymm8_t>(tmp * s + o);
+ tmp = utility::clamp<int32_t, qasymm8_t>(support::cpp11::lround(tmp * s + o));
}
else if(act == ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU)
{
tmp = std::min(a, std::max(b, in));
- tmp = utility::clamp<int32_t, qasymm8_t>(tmp * s + o);
+ tmp = utility::clamp<int32_t, qasymm8_t>(support::cpp11::lround(tmp * s + o));
}
#ifndef __aarch64__ // LUT-based implementation is used for aarch64 instead.
else if(act == ActivationLayerInfo::ActivationFunction::LOGISTIC)