Optimize CPU add layer on quantized data

* Use fixed-point arithmetic where possible.
* Various optimization for the FP32-based implementation.
  This implementation is kept as the fall-back solution
  in case of unrealistic quantization parameters that exceed
  the range of fixed-point solution.

Resolves: COMPMID-5458
Signed-off-by: Viet-Hoa Do <viet-hoa.do@arm.com>
Change-Id: I221d2d3801ecaae4fe0b7cf6ae8ef00ca3743665
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/8317
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Gunes Bayir <gunes.bayir@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>
Benchmark: Arm Jenkins <bsgcomp@arm.com>
diff --git a/src/cpu/kernels/add/generic/neon/impl.cpp b/src/cpu/kernels/add/generic/neon/impl.cpp
index 67985c9..0f7b31c 100644
--- a/src/cpu/kernels/add/generic/neon/impl.cpp
+++ b/src/cpu/kernels/add/generic/neon/impl.cpp
@@ -157,6 +157,223 @@
     }
 }
 
+bool add_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst)
+{
+    const auto iq0 = src0->quantization_info().uniform();
+    const auto iq1 = src1->quantization_info().uniform();
+    const auto oq = dst->quantization_info().uniform();
+
+    const auto scale0 = iq0.scale / oq.scale;
+    const auto scale1 = iq1.scale / oq.scale;
+
+    if(scale0 < -31.f || scale0 > 31.f || scale1 < -31.f || scale1 > 31.f)
+    {
+        // The scale factor cannot be stored as 6.10 signed fixed-point number.
+        return false;
+    }
+
+    const auto offset = float(oq.offset) - scale0 * float(iq0.offset) - scale1 * float(iq1.offset);
+    const auto max_acc = (std::abs(scale0) + std::abs(scale1)) * 1024.f + std::abs(offset);
+
+    if(max_acc > 2097151.f)  // 2^21 - 1
+    {
+        // It might not be possible to store the result as 22.10 signed fixed-point number.
+        return false;
+    }
+
+    return true;
+}
+
+template <typename ScalarType>
+void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
+{
+    ARM_COMPUTE_UNUSED(policy);
+
+    const auto in0_info = src0->info();
+    const auto in1_info = src1->info();
+
+    const auto &in0_shape = in0_info->tensor_shape();
+    const auto &in1_shape = in1_info->tensor_shape();
+
+    // Create input windows.
+    Window in0_win = window.broadcast_if_dimension_le_one(in0_shape);
+    Window in1_win = window.broadcast_if_dimension_le_one(in1_shape);
+
+    // Clear the x dimension on the execution window as we process the whole row each iteration.
+    Window win = window;
+    win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+    constexpr int window_step_x = 16;
+    const auto window_start_x = window.x().start();
+    const auto window_end_x = window.x().end();
+    const auto is_broadcast_across_x = in0_shape.x() != in1_shape.x();
+
+    const auto iq0_info = in0_info->quantization_info().uniform();
+    const auto iq1_info = in1_info->quantization_info().uniform();
+    const auto oq_info = dst->info()->quantization_info().uniform();
+
+    const auto in0_scale = iq0_info.scale / oq_info.scale;
+    const auto in1_scale = iq1_info.scale / oq_info.scale;
+    const auto offset = float(oq_info.offset) - in0_scale * float(iq0_info.offset) - in1_scale * float(iq1_info.offset);
+
+    const auto in0_scale_6p10 = static_cast<int16_t>(support::cpp11::lround(in0_scale * 1024.f));
+    const auto in1_scale_6p10 = static_cast<int16_t>(support::cpp11::lround(in1_scale * 1024.f));
+    const auto offset_22p10 = static_cast<int32_t>(support::cpp11::lround(offset * 1024.f));
+
+    if(is_broadcast_across_x)
+    {
+        // Prefix: a = non-broadcast, b = broadcast.
+
+        const auto is_broadcast_input_1 = in1_win.x().step() == 0;
+        auto a_win = is_broadcast_input_1 ? in0_win : in1_win;
+        auto b_win = is_broadcast_input_1 ? in1_win : in0_win;
+        const auto a_tensor = is_broadcast_input_1 ? src0 : src1;
+        const auto b_tensor = is_broadcast_input_1 ? src1 : src0;
+
+        const auto a_scale_6p10 = is_broadcast_input_1 ? in0_scale_6p10 : in1_scale_6p10;
+        const auto b_scale = is_broadcast_input_1 ? in1_scale : in0_scale;
+        const auto a_vscale_6p10 = wrapper::vdup_n(a_scale_6p10, wrapper::traits::vector_64_tag());
+
+        // Clear the x dimension on the execution window as we process the whole row each iteration.
+        a_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+        Iterator a_input_it(a_tensor, a_win);
+        Iterator b_input_it(b_tensor, b_win);
+        Iterator out_it(dst, win);
+
+        execute_window_loop(win, [&](const Coordinates &)
+        {
+            const auto a_ptr = reinterpret_cast<const ScalarType *>(a_input_it.ptr());
+            const auto b_ptr = reinterpret_cast<const ScalarType *>(b_input_it.ptr());
+            const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
+
+            const auto b_val = *b_ptr;
+            const auto b_scaled_22p10 = static_cast<int32_t>(support::cpp11::lround(b_scale * b_val * 1024.f));
+            const auto b_vscaled_offseted_22p10 = wrapper::vdup_n(b_scaled_22p10 + offset_22p10, wrapper::traits::vector_128_tag());
+
+            int x = window_start_x;
+
+            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+            {
+                // Load the input.
+                const auto a_vin_8p0 = wrapper::vloadq(a_ptr + x);
+
+                // Widen the non-broadcast elements to signed 16-bit regardless of the input signedness.
+                const auto a_vin_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(a_vin_8p0)));
+                const auto a_vin_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(a_vin_8p0)));
+
+                // Multiply the non-broadcast elements by the scale factor, add the scaled broadcast elements and the offset.
+                // Widen and store the result in 32-bit integer.
+                const auto vout_22p10_00 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgetlow(a_vin_16p0_0), a_vscale_6p10);
+                const auto vout_22p10_01 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgethigh(a_vin_16p0_0), a_vscale_6p10);
+                const auto vout_22p10_10 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgetlow(a_vin_16p0_1), a_vscale_6p10);
+                const auto vout_22p10_11 = wrapper::vmlal(b_vscaled_offseted_22p10, wrapper::vgethigh(a_vin_16p0_1), a_vscale_6p10);
+
+                // Remove 2 bits of the fractional part, round, narrow to 16-bit and saturate the result.
+                const auto vout_8p8_0 = wrapper::vcombine(
+                    wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_00),
+                    wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_01)
+                );
+                const auto vout_8p8_1 = wrapper::vcombine(
+                    wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_10),
+                    wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_11)
+                );
+
+                // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
+                const auto vout_8p0 = wrapper::vcombine(
+                    wrapper::vqrshrn<8>(vout_8p8_0),
+                    wrapper::vqrshrn<8>(vout_8p8_1)
+                );
+
+                // Store the result.
+                wrapper::vstore(out_ptr + x, vout_8p0);
+            }
+
+            // Process the left-over elements.
+            for(; x < window_end_x; ++x)
+            {
+                out_ptr[x] = utility::clamp<int32_t, ScalarType>((int32_t(a_ptr[x]) * a_scale_6p10 + b_scaled_22p10 + offset_22p10) >> 10);
+            }
+        },
+        b_input_it, a_input_it, out_it);
+    }
+    else
+    {
+        const auto vscale0_6p10 = wrapper::vdup_n(in0_scale_6p10, wrapper::traits::vector_64_tag());
+        const auto vscale1_6p10 = wrapper::vdup_n(in1_scale_6p10, wrapper::traits::vector_64_tag());
+        const auto voffset_22p10 = wrapper::vdup_n(offset_22p10, wrapper::traits::vector_128_tag());
+
+        // Clear the x dimension on the execution window as we process the whole row each iteration.
+        in0_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+        in1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
+
+        Iterator in0_it(src0, in0_win);
+        Iterator in1_it(src1, in1_win);
+        Iterator out_it(dst, win);
+
+        execute_window_loop(win, [&](const Coordinates &)
+        {
+            const auto in0_ptr = reinterpret_cast<const ScalarType *>(in0_it.ptr());
+            const auto in1_ptr = reinterpret_cast<const ScalarType *>(in1_it.ptr());
+            const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
+
+            int x = window_start_x;
+
+            for(; x <= (window_end_x - window_step_x); x += window_step_x)
+            {
+                // Load the inputs.
+                const auto vin0_8p0 = wrapper::vloadq(in0_ptr + x);
+                const auto vin1_8p0 = wrapper::vloadq(in1_ptr + x);
+
+                // Widen the input elements to signed 16-bit regardless of the input signedness.
+                const auto vin0_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin0_8p0)));
+                const auto vin0_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin0_8p0)));
+                const auto vin1_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin1_8p0)));
+                const auto vin1_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin1_8p0)));
+
+                // Multiply the input elements by the scale factor and add the offset.
+                // Widen and store the result in 32-bit integer.
+                const auto vscaled0_offseted_22p10_00 = wrapper::vmlal(voffset_22p10, wrapper::vgetlow(vin0_16p0_0), vscale0_6p10);
+                const auto vscaled0_offseted_22p10_01 = wrapper::vmlal(voffset_22p10, wrapper::vgethigh(vin0_16p0_0), vscale0_6p10);
+                const auto vscaled0_offseted_22p10_10 = wrapper::vmlal(voffset_22p10, wrapper::vgetlow(vin0_16p0_1), vscale0_6p10);
+                const auto vscaled0_offseted_22p10_11 = wrapper::vmlal(voffset_22p10, wrapper::vgethigh(vin0_16p0_1), vscale0_6p10);
+
+                const auto vout_22p10_00 = wrapper::vmlal(vscaled0_offseted_22p10_00, wrapper::vgetlow(vin1_16p0_0), vscale1_6p10);
+                const auto vout_22p10_01 = wrapper::vmlal(vscaled0_offseted_22p10_01, wrapper::vgethigh(vin1_16p0_0), vscale1_6p10);
+                const auto vout_22p10_10 = wrapper::vmlal(vscaled0_offseted_22p10_10, wrapper::vgetlow(vin1_16p0_1), vscale1_6p10);
+                const auto vout_22p10_11 = wrapper::vmlal(vscaled0_offseted_22p10_11, wrapper::vgethigh(vin1_16p0_1), vscale1_6p10);
+
+                // Remove 2 bits of the fractional part, round, narrow to 16-bit and saturate the result.
+                const auto vout_8p8_0 = wrapper::vcombine(
+                    wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_00),
+                    wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_01)
+                );
+                const auto vout_8p8_1 = wrapper::vcombine(
+                    wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_10),
+                    wrapper::vqrshrn_ex<2, ScalarType>(vout_22p10_11)
+                );
+
+                // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
+                const auto vout_8p0 = wrapper::vcombine(
+                    wrapper::vqrshrn<8>(vout_8p8_0),
+                    wrapper::vqrshrn<8>(vout_8p8_1)
+                );
+
+                // Store the result.
+                wrapper::vstore(out_ptr + x, vout_8p0);
+            }
+
+            // Process the left-over elements.
+            for(; x < window_end_x; ++x)
+            {
+                out_ptr[x] = utility::clamp<int32_t, ScalarType>(
+                    (int32_t(in0_ptr[x]) * in0_scale_6p10 + int32_t(in1_ptr[x]) * in1_scale_6p10 + offset_22p10) >> 10);
+            }
+        },
+        in0_it, in1_it, out_it);
+    }
+}
+
 template void add_same_neon<float>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
 template void add_same_neon<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
 template void add_same_neon<int32_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
@@ -175,5 +392,8 @@
 template void add_same_neon_as_1d_array<float16_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
 #endif /* (__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
 
+template void add_q8_neon_fixedpoint<int8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+template void add_q8_neon_fixedpoint<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
+
 } // namespace cpu
 } // namespace arm_compute