Sheri Zhang | fc6744a | 2021-01-13 15:54:05 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2021 Arm Limited. |
| 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
| 24 | #include "arm_compute/core/Helpers.h" |
| 25 | #include "arm_compute/core/ITensor.h" |
| 26 | #include "arm_compute/core/Types.h" |
| 27 | #include "arm_compute/core/utils/misc/Traits.h" |
| 28 | #include "src/core/NEON/wrapper/intrinsics/intrinsics.h" |
| 29 | #include "src/core/helpers/WindowHelpers.h" |
| 30 | |
| 31 | namespace arm_compute |
| 32 | { |
| 33 | namespace cpu |
| 34 | { |
| 35 | void sub_qsymm16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window) |
| 36 | { |
| 37 | ARM_COMPUTE_UNUSED(policy); |
| 38 | |
| 39 | // Create input windows |
| 40 | Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape()); |
| 41 | Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape()); |
| 42 | |
| 43 | // Clear X Dimension on execution window as we handle manually |
| 44 | Window win = window; |
| 45 | win.set(Window::DimX, Window::Dimension(0, 1, 1)); |
| 46 | |
| 47 | const int window_step_x = 8; |
| 48 | const auto window_start_x = static_cast<int>(window.x().start()); |
| 49 | const auto window_end_x = static_cast<int>(window.x().end()); |
| 50 | const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x(); |
| 51 | |
| 52 | const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform(); |
| 53 | const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform(); |
| 54 | const UniformQuantizationInfo oq_info = dst->info()->quantization_info().uniform(); |
| 55 | |
| 56 | const float32x4_t vscale1 = vdupq_n_f32(iq1_info.scale); |
| 57 | const float32x4_t vscale2 = vdupq_n_f32(iq2_info.scale); |
| 58 | const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale); |
| 59 | |
| 60 | if(is_broadcast_across_x) |
| 61 | { |
| 62 | const bool is_broadcast_input_2 = input2_win.x().step() == 0; |
| 63 | Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win; |
| 64 | Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win; |
| 65 | const ITensor *broadcast_tensor = is_broadcast_input_2 ? src1 : src0; |
| 66 | const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0; |
| 67 | const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform(); |
| 68 | const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform(); |
| 69 | |
| 70 | // Clear X Dimension on execution window as we handle manually |
| 71 | non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1)); |
| 72 | |
| 73 | Iterator broadcast_input(broadcast_tensor, broadcast_win); |
| 74 | Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win); |
| 75 | Iterator output(dst, win); |
| 76 | |
| 77 | execute_window_loop(win, [&](const Coordinates &) |
| 78 | { |
| 79 | const auto non_broadcast_input_ptr = reinterpret_cast<const int16_t *>(non_broadcast_input.ptr()); |
| 80 | const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr()); |
| 81 | |
| 82 | const int16_t broadcast_value = *reinterpret_cast<const int16_t *>(broadcast_input.ptr()); |
| 83 | const int16x8_t broadcast_value_vec = vdupq_n_s16(broadcast_value); |
| 84 | |
| 85 | const float32x4x2_t bf = |
| 86 | { |
| 87 | { |
| 88 | vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(broadcast_value_vec))), vscale2), |
| 89 | vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(broadcast_value_vec))), vscale2), |
| 90 | } |
| 91 | }; |
| 92 | const float bfs = static_cast<int32_t>(broadcast_value) * broadcast_qinfo.scale; |
| 93 | |
| 94 | // Compute S elements per iteration |
| 95 | int x = window_start_x; |
| 96 | for(; x <= (window_end_x - window_step_x); x += window_step_x) |
| 97 | { |
| 98 | const int16x8_t a = vld1q_s16(non_broadcast_input_ptr + x); |
| 99 | const float32x4x2_t af = |
| 100 | { |
| 101 | { |
| 102 | vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(a))), vscale1), |
| 103 | vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(a))), vscale1), |
| 104 | } |
| 105 | }; |
| 106 | |
| 107 | const int32x4x4_t rf = |
| 108 | { |
| 109 | { |
| 110 | #ifdef __aarch64__ |
| 111 | vcvtnq_s32_f32(vmulq_f32(is_broadcast_input_2 ? vsubq_f32(bf.val[0], af.val[0]) : vsubq_f32(af.val[0], bf.val[0]), invvscaleo)), |
| 112 | vcvtnq_s32_f32(vmulq_f32(is_broadcast_input_2 ? vsubq_f32(bf.val[1], af.val[1]) : vsubq_f32(af.val[1], bf.val[1]), invvscaleo)), |
| 113 | #else //__aarch64__ |
| 114 | vcvtq_s32_f32(vmulq_f32(is_broadcast_input_2 ? vsubq_f32(bf.val[0], af.val[0]) : vsubq_f32(af.val[0], bf.val[0]), invvscaleo)), |
| 115 | vcvtq_s32_f32(vmulq_f32(is_broadcast_input_2 ? vsubq_f32(bf.val[1], af.val[1]) : vsubq_f32(af.val[1], bf.val[1]), invvscaleo)), |
| 116 | #endif //__aarch64__ |
| 117 | } |
| 118 | }; |
| 119 | |
| 120 | const int16x8_t pa = vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])); |
| 121 | vst1q_s16(output_ptr + x, pa); |
| 122 | } |
| 123 | |
| 124 | // Compute left-over elements |
| 125 | for(; x < window_end_x; ++x) |
| 126 | { |
| 127 | const float afs = static_cast<int32_t>(*(non_broadcast_input_ptr + x)) * non_broadcast_qinfo.scale; |
| 128 | *(output_ptr + x) = quantize_qsymm16(is_broadcast_input_2 ? (bfs - afs) : (afs - bfs), oq_info); |
| 129 | } |
| 130 | }, |
| 131 | broadcast_input, non_broadcast_input, output); |
| 132 | } |
| 133 | else |
| 134 | { |
| 135 | // Clear X Dimension on execution window as we handle manually |
| 136 | input1_win.set(Window::DimX, Window::Dimension(0, 1, 1)); |
| 137 | input2_win.set(Window::DimX, Window::Dimension(0, 1, 1)); |
| 138 | |
| 139 | Iterator input1(src0, input1_win); |
| 140 | Iterator input2(src1, input2_win); |
| 141 | Iterator output(dst, win); |
| 142 | |
| 143 | execute_window_loop(win, [&](const Coordinates &) |
| 144 | { |
| 145 | const auto input1_ptr = reinterpret_cast<const int16_t *>(input1.ptr()); |
| 146 | const auto input2_ptr = reinterpret_cast<const int16_t *>(input2.ptr()); |
| 147 | const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr()); |
| 148 | |
| 149 | // Compute S elements per iteration |
| 150 | int x = window_start_x; |
| 151 | for(; x <= (window_end_x - window_step_x); x += window_step_x) |
| 152 | { |
| 153 | const int16x8_t a = vld1q_s16(input1_ptr + x); |
| 154 | const int16x8_t b = vld1q_s16(input2_ptr + x); |
| 155 | |
| 156 | const float32x4x2_t af = |
| 157 | { |
| 158 | { |
| 159 | vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(a))), vscale1), |
| 160 | vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(a))), vscale1), |
| 161 | } |
| 162 | }; |
| 163 | |
| 164 | const float32x4x2_t bf = |
| 165 | { |
| 166 | { |
| 167 | vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(b))), vscale2), |
| 168 | vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(b))), vscale2), |
| 169 | } |
| 170 | }; |
| 171 | |
| 172 | const int32x4x2_t rf = |
| 173 | { |
| 174 | { |
| 175 | #ifdef __aarch64__ |
| 176 | vcvtnq_s32_f32(vmulq_f32(vsubq_f32(af.val[0], bf.val[0]), invvscaleo)), |
| 177 | vcvtnq_s32_f32(vmulq_f32(vsubq_f32(af.val[1], bf.val[1]), invvscaleo)), |
| 178 | #else //__aarch64__ |
| 179 | vcvtq_s32_f32(vmulq_f32(vsubq_f32(af.val[0], bf.val[0]), invvscaleo)), |
| 180 | vcvtq_s32_f32(vmulq_f32(vsubq_f32(af.val[1], bf.val[1]), invvscaleo)), |
| 181 | #endif //__aarch64__ |
| 182 | } |
| 183 | }; |
| 184 | |
| 185 | const int16x8_t pa = vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])); |
| 186 | vst1q_s16(output_ptr + x, pa); |
| 187 | } |
| 188 | |
| 189 | // Compute left-over elements |
| 190 | for(; x < window_end_x; ++x) |
| 191 | { |
| 192 | const float afs = static_cast<int32_t>((*(input1_ptr + x))) * iq1_info.scale; |
| 193 | const float bfs = static_cast<int32_t>((*(input2_ptr + x))) * iq2_info.scale; |
| 194 | *(output_ptr + x) = quantize_qsymm16((afs - bfs), dst->info()->quantization_info()); |
| 195 | } |
| 196 | }, |
| 197 | input1, input2, output); |
| 198 | } |
| 199 | } |
| 200 | } // namespace cpu |
| 201 | } // namespace arm_compute |