blob: c1e975e77e94b88f3eb7665e25a062f8021a6d4e [file] [log] [blame]
Michalis Spyroub7b31532017-11-23 12:10:21 +00001/*
Georgios Pinitasd05dce42018-01-22 16:29:17 +00002 * Copyright (c) 2016-2018 ARM Limited.
Michalis Spyroub7b31532017-11-23 12:10:21 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NEGEMMMatrixVectorMultiplyKernel.h"
25
26#include "arm_compute/core/AccessWindowStatic.h"
27#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/ITensor.h"
30#include "arm_compute/core/NEON/INEKernel.h"
31#include "arm_compute/core/Types.h"
32#include "arm_compute/core/Validate.h"
33#include "arm_compute/core/Window.h"
34
35#include <arm_neon.h>
36#include <cstddef>
37#include <cstdint>
38#include <tuple>
39
40using namespace arm_compute;
41
Georgios Pinitasd05dce42018-01-22 16:29:17 +000042template <typename I0, typename I1, typename O>
43void NEGEMMMatrixVectorMultiplyKernel::matrix_vector_multiply(const Window &window_in, const Window &window_w, const Window &window_out)
Michalis Spyroub7b31532017-11-23 12:10:21 +000044{
Georgios Pinitasd05dce42018-01-22 16:29:17 +000045 ARM_COMPUTE_ERROR("Unsupported data types");
46 ARM_COMPUTE_UNUSED(window_in);
47 ARM_COMPUTE_UNUSED(window_w);
48 ARM_COMPUTE_UNUSED(window_out);
Michalis Spyroub7b31532017-11-23 12:10:21 +000049}
50
Georgios Pinitasd05dce42018-01-22 16:29:17 +000051namespace arm_compute
Michalis Spyroub7b31532017-11-23 12:10:21 +000052{
Georgios Pinitasd05dce42018-01-22 16:29:17 +000053template <>
54void NEGEMMMatrixVectorMultiplyKernel::matrix_vector_multiply<float, float, float>(const Window &window_in,
55 const Window &window_w,
56 const Window &window_out)
Michalis Spyroub7b31532017-11-23 12:10:21 +000057{
Michalis Spyroub7b31532017-11-23 12:10:21 +000058 Iterator in(_input0, window_in);
Georgios Pinitasd05dce42018-01-22 16:29:17 +000059 Iterator in2(_input1, window_w);
Michalis Spyroub7b31532017-11-23 12:10:21 +000060 Iterator out(_output, window_out);
61
62 const int input_w = _input0->info()->dimension(0);
63 const int input_h = _input0->info()->dimension(1);
64 const int input_stride_x = _input0->info()->strides_in_bytes().x();
65 const int weights_stride_x = _input1->info()->strides_in_bytes().x();
66 const int weights_stride_y = _input1->info()->strides_in_bytes().y();
67 const int output_stride_x = _output->info()->strides_in_bytes().x();
68
69 execute_window_loop(window_in, [&](const Coordinates & id)
70 {
71 // Get pointers
72 const uint8_t *const input_ptr = in.ptr();
73 const uint8_t *const weights_ptr = in2.ptr() + id.z() * weights_stride_y;
74 auto output_ptr = reinterpret_cast<float *>(out.ptr() + (id.y() + id.z() * input_h) * output_stride_x);
75
76 float32x4_t row_dot = vdupq_n_f32(0.f);
77 for(int i = 0; i < input_w; i += 4)
78 {
79 const auto input = vld1q_f32(reinterpret_cast<const float *>(input_ptr + i * input_stride_x));
80 const auto weights = vld1q_f32(reinterpret_cast<const float *>(weights_ptr + i * weights_stride_x));
81 row_dot = vaddq_f32(row_dot, vmulq_f32(input, weights));
82 }
83
84 auto temp = vadd_f32(vget_high_f32(row_dot), vget_low_f32(row_dot));
85 temp = vpadd_f32(temp, temp);
86
87 *output_ptr = vget_lane_f32(temp, 0);
88 },
89 in, in2, out);
90}
Georgios Pinitasd05dce42018-01-22 16:29:17 +000091
92template <>
93void NEGEMMMatrixVectorMultiplyKernel::matrix_vector_multiply<uint8_t, uint8_t, int32_t>(const Window &window_in,
94 const Window &window_w,
95 const Window &window_out)
96{
97 Iterator in(_input0, window_in);
98 Iterator in2(_input1, window_w);
99 Iterator out(_output, window_out);
100
101 const int input_offset = -_input0->info()->quantization_info().offset;
102 const int weights_offset = -_input1->info()->quantization_info().offset;
103
104 const int input_w = _input0->info()->dimension(0);
105 const int input_h = _input0->info()->dimension(1);
106 const int input_stride_x = _input0->info()->strides_in_bytes().x();
107 const int weights_stride_x = _input1->info()->strides_in_bytes().x();
108 const int weights_stride_y = _input1->info()->strides_in_bytes().y();
109 const int output_stride_x = _output->info()->strides_in_bytes().x();
110 const int read_step = 16 / _input0->info()->element_size();
111
112 const int32x4_t v_input_offset = vdupq_n_s32(input_offset);
113 const int32x4_t v_weights_offset = vdupq_n_s32(weights_offset);
114
115 execute_window_loop(window_in, [&](const Coordinates & id)
116 {
117 // Get pointers
118 const uint8_t *const input_ptr = in.ptr();
119 const uint8_t *const weights_ptr = in2.ptr() + id.z() * weights_stride_y;
120 auto output_ptr = reinterpret_cast<int32_t *>(out.ptr() + (id.y() + id.z() * input_h) * output_stride_x);
121
122 int32x4_t row_dot = vdupq_n_s32(0);
123 for(int i = 0; i < input_w; i += read_step)
124 {
125 // Read values
126 const auto input = vld1q_u8(reinterpret_cast<const uint8_t *>(input_ptr + i * input_stride_x));
127 const auto weights = vld1q_u8(reinterpret_cast<const uint8_t *>(weights_ptr + i * weights_stride_x));
128
129 // Add offsets
130 const int32x4x4_t input_s32 =
131 {
132 {
133 vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(input))))),
134 vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(vget_low_u8(input))))),
135 vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_high_u8(input))))),
136 vaddw_s16(v_input_offset, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(vget_high_u8(input)))))
137 }
138 };
139 const int32x4x4_t weights_s32 =
140 {
141 {
142 vaddw_s16(v_weights_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_low_u8(weights))))),
143 vaddw_s16(v_weights_offset, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(vget_low_u8(weights))))),
144 vaddw_s16(v_weights_offset, vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vget_high_u8(weights))))),
145 vaddw_s16(v_weights_offset, vreinterpret_s16_u16(vget_high_u16(vmovl_u8(vget_high_u8(weights)))))
146 }
147 };
148
149 // Dot
150 row_dot = vaddq_s32(row_dot, vmulq_s32(input_s32.val[0], weights_s32.val[0]));
151 row_dot = vaddq_s32(row_dot, vmulq_s32(input_s32.val[1], weights_s32.val[1]));
152 row_dot = vaddq_s32(row_dot, vmulq_s32(input_s32.val[2], weights_s32.val[2]));
153 row_dot = vaddq_s32(row_dot, vmulq_s32(input_s32.val[3], weights_s32.val[3]));
154 }
155
156 // Reduction
157 auto temp = vadd_s32(vget_high_s32(row_dot), vget_low_s32(row_dot));
158 temp = vpadd_s32(temp, temp);
159
160 *output_ptr = vget_lane_s32(temp, 0);
161 },
162 in, in2, out);
163}
164} //namespace arm_compute
165
166NEGEMMMatrixVectorMultiplyKernel::NEGEMMMatrixVectorMultiplyKernel()
167 : _func(nullptr), _input0(nullptr), _input1(nullptr), _output(nullptr), _border_size(0)
168{
169}
170
171BorderSize NEGEMMMatrixVectorMultiplyKernel::border_size() const
172{
173 return _border_size;
174}
175
176void NEGEMMMatrixVectorMultiplyKernel::configure(const ITensor *input0, const ITensor *input1, ITensor *output)
177{
178 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8, DataType::F32);
179 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
180 ARM_COMPUTE_ERROR_ON_MISMATCHING_FIXED_POINT(input0, input1, output);
181 ARM_COMPUTE_ERROR_ON(is_data_type_quantized_asymmetric(input0->info()->data_type()) && (output->info()->data_type() != DataType::S32));
182 ARM_COMPUTE_ERROR_ON(input0->info()->dimension(2) != input1->info()->dimension(1));
183
184 _input0 = input0;
185 _input1 = input1;
186 _output = output;
187
188 // Set appropriate function to run
189 switch(input0->info()->data_type())
190 {
191 case DataType::QASYMM8:
192 _func = &NEGEMMMatrixVectorMultiplyKernel::matrix_vector_multiply<uint8_t, uint8_t, int32_t>;
193 break;
194 case DataType::F32:
195 _func = &NEGEMMMatrixVectorMultiplyKernel::matrix_vector_multiply<float, float, float>;
196 break;
197 default:
198 ARM_COMPUTE_ERROR("Unsupported data type");
199 }
200
201 // Configure kernel window
202 const unsigned int num_elems_read_per_iteration = 16 / _input0->info()->element_size();
203
204 const unsigned int border_x = ceil_to_multiple(input0->info()->dimension(0), num_elems_read_per_iteration) - input0->info()->dimension(0);
205 _border_size = BorderSize(0, border_x);
206
207 Window win = calculate_max_window(*input0->info(), Steps(num_elems_read_per_iteration));
208
209 AccessWindowHorizontal input0_access(input0->info(), 0, num_elems_read_per_iteration);
210 AccessWindowHorizontal input1_access(input1->info(), 0, num_elems_read_per_iteration);
211 AccessWindowStatic output_access(output->info(), 0, 0, output->info()->dimension(0), output->info()->dimension(1));
212
213 update_window_and_padding(win, input0_access, input1_access, output_access);
214
215 _output->info()->set_valid_region(ValidRegion(Coordinates(), _output->info()->tensor_shape()));
216
217 INEKernel::configure(win);
218}
219
220void NEGEMMMatrixVectorMultiplyKernel::run(const Window &window, const ThreadInfo &info)
221{
222 ARM_COMPUTE_UNUSED(info);
223 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
224 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
225
226 Window window_slice = window.first_slice_window_3D();
227
228 Window window_in(window);
229 Window window_weights(window_slice);
230 Window window_out(window);
231
232 // Setup input0 slice
233 window_in.set(Window::DimX, Window::Dimension(0, _input0->info()->dimension(0), _input0->info()->dimension(0)));
234 window_in.set(Window::DimY, Window::Dimension(0, _input0->info()->dimension(1), 1));
235 window_in.set(Window::DimZ, Window::Dimension(0, _input0->info()->dimension(2), 1));
236
237 // Setup input1 and output slice. Their dimensions are increased in the kernel.
238 window_weights.set(Window::DimX, Window::Dimension(0, 0, 0));
239 window_weights.set(Window::DimY, Window::Dimension(0, 0, 0));
240 window_weights.set(Window::DimZ, Window::Dimension(0, 0, 0));
241
242 window_out.set(Window::DimX, Window::Dimension(0, 0, 0));
243 window_out.set(Window::DimY, Window::Dimension(0, 0, 0));
244 window_out.set(Window::DimZ, Window::Dimension(0, 0, 0));
245
246 if(_func != nullptr)
247 {
248 (this->*_func)(window_in, window_weights, window_out);
249 }
250}