blob: 1352f34e3cda1d7ac054362e8c72d160d0a92c62 [file] [log] [blame]
Anthony Barbier6ff3b192017-09-04 18:44:23 +01001/*
Gian Marco Iodiceab182122017-10-09 15:05:40 +01002 * Copyright (c) 2017 ARM Limited.
Anthony Barbier6ff3b192017-09-04 18:44:23 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/core/NEON/kernels/NEGEMMLowpMatrixMultiplyKernel.h"
25
Gian Marco Iodiceab182122017-10-09 15:05:40 +010026#include "arm_compute/core/AccessWindowStatic.h"
Anthony Barbier6ff3b192017-09-04 18:44:23 +010027#include "arm_compute/core/Error.h"
28#include "arm_compute/core/Helpers.h"
29#include "arm_compute/core/ITensor.h"
30#include "arm_compute/core/TensorInfo.h"
31#include "arm_compute/core/Types.h"
32#include "arm_compute/core/Utils.h"
33#include "arm_compute/core/Validate.h"
34#include "arm_compute/core/Window.h"
35
36#include <arm_neon.h>
37#include <cstddef>
38#include <cstdint>
39#include <tuple>
40
41using namespace arm_compute;
42
43namespace arm_compute
44{
45class Coordinates;
46} // namespace arm_compute
47
48NEGEMMLowpMatrixMultiplyKernel::NEGEMMLowpMatrixMultiplyKernel()
Gian Marco Iodiceab182122017-10-09 15:05:40 +010049 : _input0(nullptr), _input1(nullptr), _output(nullptr), _slide_matrix_b(true)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010050{
51}
52
Gian Marco Iodiceab182122017-10-09 15:05:40 +010053void NEGEMMLowpMatrixMultiplyKernel::configure(const ITensor *input0, const ITensor *input1, ITensor *output)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010054{
Gian Marcoe75a02b2017-11-08 12:24:09 +000055 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input0, 1, DataType::QASYMM8);
Gian Marco Iodiceab182122017-10-09 15:05:40 +010056 ARM_COMPUTE_ERROR_ON_MISMATCHING_DATA_TYPES(input0, input1);
57 ARM_COMPUTE_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::S32);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010058
Gian Marco Iodiceab182122017-10-09 15:05:40 +010059 // Check if matrix B should be slidden or not
60 // Don't slide matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
61 // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
62 TensorShape in0_shape = input0->info()->tensor_shape();
63 TensorShape in1_shape = input1->info()->tensor_shape();
64 TensorShape out_shape = output->info()->tensor_shape();
65
66 in0_shape.collapse(2);
67 in1_shape.collapse(2);
68 out_shape.collapse(2);
69
70 ARM_COMPUTE_ERROR_ON_MSG(in0_shape[2] != out_shape[2], "Output tensor must have the same number of batches of input0 tensor");
71 ARM_COMPUTE_ERROR_ON_MSG(in1_shape[2] != 1 && in0_shape[2] != in1_shape[2], "Input1 tensor must have the same number of batches of input0 or the number of batches must be set to 1");
72
73 _input0 = input0;
74 _input1 = input1;
75 _output = output;
76 _slide_matrix_b = in1_shape[2] != 1;
Anthony Barbier6ff3b192017-09-04 18:44:23 +010077
78 constexpr unsigned int num_elems_processed_per_iteration_x = 16;
79 constexpr unsigned int num_elems_processed_per_iteration_y = 4;
80
81 Window win = calculate_max_window(*output->info(), Steps(num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y));
82
Gian Marco Iodiceab182122017-10-09 15:05:40 +010083 AccessWindowStatic in0_access(input0->info(), 0, 0, ceil_to_multiple(input0->info()->dimension(0), 8), input0->info()->dimension(1));
Anthony Barbier6ff3b192017-09-04 18:44:23 +010084 AccessWindowHorizontal in1_access(input1->info(), 0, num_elems_processed_per_iteration_x);
Gian Marco Iodiceab182122017-10-09 15:05:40 +010085 AccessWindowRectangle output_access(output->info(), 0, 0, num_elems_processed_per_iteration_x, num_elems_processed_per_iteration_y);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010086
87 update_window_and_padding(win, in0_access, in1_access, output_access);
88
89 output_access.set_valid_region(win, ValidRegion(Coordinates(0, 0), output->info()->tensor_shape()));
90 INEKernel::configure(win);
91}
92
Moritz Pflanzerc186b572017-09-07 09:48:04 +010093void NEGEMMLowpMatrixMultiplyKernel::run(const Window &window, const ThreadInfo &info)
Anthony Barbier6ff3b192017-09-04 18:44:23 +010094{
Moritz Pflanzerc186b572017-09-07 09:48:04 +010095 ARM_COMPUTE_UNUSED(info);
Anthony Barbier6ff3b192017-09-04 18:44:23 +010096 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
97 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
98
99 const size_t in_b_stride = _input1->info()->strides_in_bytes()[1];
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100100 const size_t out_stride = _output->info()->strides_in_bytes()[1] / _output->info()->element_size();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100101
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100102 // Set step_x and step_y for matrix A. Scale by a factor of 4 the Y range as the input interleaved matrix A has 4 times less the rows of the output matrix
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100103 Window win_a(window);
104 win_a.set(Window::DimX, Window::Dimension(0, 0, 0));
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100105 win_a.set(Window::DimY, Window::Dimension(window.y().start() / 4, window.y().end() / 4, 1));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100106
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100107 // Set step_x and step_y for matrix B. Scale by a factor of 16 the X range as the input transposed matrix A has 16 times less the columns of the output matrix
108 Window win_b;
109 // Don't slice matrix B along the z dimension if matrix B has just 2 dimensions and matrix A more than 2
110 // This scenario can happen when the the matrix multiplication is used to perform a convolution operation
111 if(_slide_matrix_b)
112 {
113 win_b = window;
114 }
115 win_b.set(Window::DimX, Window::Dimension(window.x().start() / 16, window.x().end() / 16, in_b_stride));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100116 win_b.set(Window::DimY, Window::Dimension(0, 0, 0));
117
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100118 // The step x and step y for the output matrix has been already set using in configure()
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100119 Iterator ina(_input0, win_a);
120 Iterator inb(_input1, win_b);
121 Iterator out(_output, window);
122
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100123 const int width_b = _input1->info()->dimension(0);
124
125 // The implementation assumes that the matrix A and Matrix B have been reshaped respectively with NEGEMMInterleave4x4 and NEGEMMTranspose1xW
126 // The reshaping of the matrices helps to have a cache friendly implementation and helps to avoid the data re-arrangements needed for computing 16x4 elements per iteration
127 // All the values needed for computing a single 4x4 block will be read from consecutive memory positions
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100128 execute_window_loop(window, [&](const Coordinates & id)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100129 {
Gian Marcoe75a02b2017-11-08 12:24:09 +0000130 const uint8_t *mtx_a0 = ina.ptr();
131 const uint8_t *mtx_b0 = inb.ptr();
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100132
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100133 // Note: Since the input are all positives, we can use uint32_t
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100134 // Accumulators for the block 0
Gian Marcoe75a02b2017-11-08 12:24:09 +0000135 uint32x4x4_t c0 =
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100136 {
137 {
Gian Marcoe75a02b2017-11-08 12:24:09 +0000138 vdupq_n_u32(0),
139 vdupq_n_u32(0),
140 vdupq_n_u32(0),
141 vdupq_n_u32(0)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100142 }
143 };
144
145 // Accumulators for the block 1
Gian Marcoe75a02b2017-11-08 12:24:09 +0000146 uint32x4x4_t c1 =
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100147 {
148 {
Gian Marcoe75a02b2017-11-08 12:24:09 +0000149 vdupq_n_u32(0),
150 vdupq_n_u32(0),
151 vdupq_n_u32(0),
152 vdupq_n_u32(0)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100153 }
154 };
155
156 // Accumulators for the block 2
Gian Marcoe75a02b2017-11-08 12:24:09 +0000157 uint32x4x4_t c2 =
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100158 {
159 {
Gian Marcoe75a02b2017-11-08 12:24:09 +0000160 vdupq_n_u32(0),
161 vdupq_n_u32(0),
162 vdupq_n_u32(0),
163 vdupq_n_u32(0)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100164 }
165 };
166
167 // Accumulators for the block 3
Gian Marcoe75a02b2017-11-08 12:24:09 +0000168 uint32x4x4_t c3 =
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100169 {
170 {
Gian Marcoe75a02b2017-11-08 12:24:09 +0000171 vdupq_n_u32(0),
172 vdupq_n_u32(0),
173 vdupq_n_u32(0),
174 vdupq_n_u32(0)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100175 }
176 };
177
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100178 for(int k = 0; k < width_b; k += 16, mtx_a0 += 4, mtx_b0 += 16)
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100179 {
Gian Marcoe75a02b2017-11-08 12:24:09 +0000180 const uint8x8_t a00_u8 = vld1_u8(mtx_a0);
181 const uint8x16_t b00_u8 = vld1q_u8(mtx_b0);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100182
Pablo Tello6ff12a02017-11-02 16:09:35 +0000183 // Convert a00_s8 to uint16_t and get the lower part
Gian Marcoe75a02b2017-11-08 12:24:09 +0000184 const uint16x4_t a00_u16 = vget_low_u16(vmovl_u8(a00_u8));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100185
Gian Marcoe75a02b2017-11-08 12:24:09 +0000186 // Convert b00_s8 to uint16_t
187 const uint16x4x4_t b00_u16 =
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100188 {
189 {
Gian Marcoe75a02b2017-11-08 12:24:09 +0000190 vget_low_u16(vmovl_u8(vget_low_u8(b00_u8))),
191 vget_high_u16(vmovl_u8(vget_low_u8(b00_u8))),
192 vget_low_u16(vmovl_u8(vget_high_u8(b00_u8))),
193 vget_high_u16(vmovl_u8(vget_high_u8(b00_u8)))
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100194 }
195 };
196
197 // 4x4 block 0
Gian Marcoe75a02b2017-11-08 12:24:09 +0000198 c0.val[0] = vmlal_lane_u16(c0.val[0], b00_u16.val[0], a00_u16, 0);
199 c0.val[1] = vmlal_lane_u16(c0.val[1], b00_u16.val[1], a00_u16, 0);
200 c0.val[2] = vmlal_lane_u16(c0.val[2], b00_u16.val[2], a00_u16, 0);
201 c0.val[3] = vmlal_lane_u16(c0.val[3], b00_u16.val[3], a00_u16, 0);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100202
203 // 4x4 block 1
Gian Marcoe75a02b2017-11-08 12:24:09 +0000204 c1.val[0] = vmlal_lane_u16(c1.val[0], b00_u16.val[0], a00_u16, 1);
205 c1.val[1] = vmlal_lane_u16(c1.val[1], b00_u16.val[1], a00_u16, 1);
206 c1.val[2] = vmlal_lane_u16(c1.val[2], b00_u16.val[2], a00_u16, 1);
207 c1.val[3] = vmlal_lane_u16(c1.val[3], b00_u16.val[3], a00_u16, 1);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100208
209 // 4x4 block 2
Gian Marcoe75a02b2017-11-08 12:24:09 +0000210 c2.val[0] = vmlal_lane_u16(c2.val[0], b00_u16.val[0], a00_u16, 2);
211 c2.val[1] = vmlal_lane_u16(c2.val[1], b00_u16.val[1], a00_u16, 2);
212 c2.val[2] = vmlal_lane_u16(c2.val[2], b00_u16.val[2], a00_u16, 2);
213 c2.val[3] = vmlal_lane_u16(c2.val[3], b00_u16.val[3], a00_u16, 2);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100214
215 // 4x4 block 3
Gian Marcoe75a02b2017-11-08 12:24:09 +0000216 c3.val[0] = vmlal_lane_u16(c3.val[0], b00_u16.val[0], a00_u16, 3);
217 c3.val[1] = vmlal_lane_u16(c3.val[1], b00_u16.val[1], a00_u16, 3);
218 c3.val[2] = vmlal_lane_u16(c3.val[2], b00_u16.val[2], a00_u16, 3);
219 c3.val[3] = vmlal_lane_u16(c3.val[3], b00_u16.val[3], a00_u16, 3);
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100220 }
221
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100222 auto mtx_out = reinterpret_cast<int32_t *>(out.ptr());
Gian Marcoe75a02b2017-11-08 12:24:09 +0000223 vst1q_s32(mtx_out + 0 * out_stride + 0, vreinterpretq_s32_u32(c0.val[0]));
224 vst1q_s32(mtx_out + 0 * out_stride + 4, vreinterpretq_s32_u32(c0.val[1]));
225 vst1q_s32(mtx_out + 0 * out_stride + 8, vreinterpretq_s32_u32(c0.val[2]));
226 vst1q_s32(mtx_out + 0 * out_stride + 12, vreinterpretq_s32_u32(c0.val[3]));
227 vst1q_s32(mtx_out + 1 * out_stride + 0, vreinterpretq_s32_u32(c1.val[0]));
228 vst1q_s32(mtx_out + 1 * out_stride + 4, vreinterpretq_s32_u32(c1.val[1]));
229 vst1q_s32(mtx_out + 1 * out_stride + 8, vreinterpretq_s32_u32(c1.val[2]));
230 vst1q_s32(mtx_out + 1 * out_stride + 12, vreinterpretq_s32_u32(c1.val[3]));
231 vst1q_s32(mtx_out + 2 * out_stride + 0, vreinterpretq_s32_u32(c2.val[0]));
232 vst1q_s32(mtx_out + 2 * out_stride + 4, vreinterpretq_s32_u32(c2.val[1]));
233 vst1q_s32(mtx_out + 2 * out_stride + 8, vreinterpretq_s32_u32(c2.val[2]));
234 vst1q_s32(mtx_out + 2 * out_stride + 12, vreinterpretq_s32_u32(c2.val[3]));
235 vst1q_s32(mtx_out + 3 * out_stride + 0, vreinterpretq_s32_u32(c3.val[0]));
236 vst1q_s32(mtx_out + 3 * out_stride + 4, vreinterpretq_s32_u32(c3.val[1]));
237 vst1q_s32(mtx_out + 3 * out_stride + 8, vreinterpretq_s32_u32(c3.val[2]));
238 vst1q_s32(mtx_out + 3 * out_stride + 12, vreinterpretq_s32_u32(c3.val[3]));
Anthony Barbier6ff3b192017-09-04 18:44:23 +0100239 },
240 ina, inb, out);
241}