blob: 97d05327e7a4d0712f97cdc6b90ef9d135aa0856 [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
Gian Marco Iodicebc415af2019-06-13 15:58:32 +01002 * Copyright (c) 2017-2019 ARM Limited.
Pablo Tello299025a2017-09-29 11:30:12 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Gian Marcoe75a02b2017-11-08 12:24:09 +000024#include "GEMMLowp.h"
Pablo Tello299025a2017-09-29 11:30:12 +010025
26#include "arm_compute/core/Types.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000027#include "tests/validation/reference/UtilsQuantizedAsymm.h"
Gian Marco58c57942017-11-28 09:10:03 +000028
29#include <limits>
Pablo Tello299025a2017-09-29 11:30:12 +010030
31namespace arm_compute
32{
33namespace test
34{
35namespace validation
36{
37namespace reference
38{
Gian Marco6b77e912017-11-17 09:27:57 +000039namespace
40{
41template <typename T>
42void quantize_down_int32_to_uint8_scale(const SimpleTensor<T> *in, const SimpleTensor<T> *bias, SimpleTensor<uint8_t> *dst, int32_t result_offset, int32_t result_mult_int, int32_t result_shift,
43 int32_t min, int32_t max)
44{
45 const int cols_in = in->shape().x();
46
47 for(int i = 0; i < in->num_elements(); ++i)
48 {
Gian Marco58c57942017-11-28 09:10:03 +000049 int32_t result = ((*in)[i] + result_offset);
Gian Marco6b77e912017-11-17 09:27:57 +000050
51 if(bias != nullptr)
52 {
53 result += (*bias)[i % cols_in];
54 }
55
Gian Marco58c57942017-11-28 09:10:03 +000056 result *= result_mult_int;
57
Gian Marco6b77e912017-11-17 09:27:57 +000058 result >>= result_shift;
59
60 // Bounded ReLu
61 if(min != max)
62 {
63 result = std::max(min, std::min(max, result));
64 }
65
66 (*dst)[i] = static_cast<uint8_t>(std::max(0, std::min(255, result)));
67 }
68}
Gian Marco58c57942017-11-28 09:10:03 +000069
70template <typename T>
71void quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> *in, const SimpleTensor<T> *bias, SimpleTensor<uint8_t> *dst, int32_t result_fixedpoint_multiplier, int32_t result_shift,
72 int32_t result_offset_after_shift, int32_t min, int32_t max)
73{
74 const int cols_in = in->shape().x();
75
76 for(int i = 0; i < in->num_elements(); ++i)
77 {
78 int32_t result = (*in)[i];
79
80 if(bias != nullptr)
81 {
82 result += (*bias)[i % cols_in];
83 }
84
85 // Fixed point multiplication
86 result = asymm_rounding_divide_by_pow2(asymm_int_mult(result, result_fixedpoint_multiplier), result_shift);
87 result += result_offset_after_shift;
88
89 // Bounded ReLu
90 if(min != max)
91 {
92 result = std::max(min, std::min(max, result));
93 }
94
95 (*dst)[i] = static_cast<uint8_t>(std::max(0, std::min(255, result)));
96 }
97}
Gian Marco Iodicebc415af2019-06-13 15:58:32 +010098
99template <typename T>
100void quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> *in, const SimpleTensor<T> *bias, SimpleTensor<int16_t> *dst, int32_t result_fixedpoint_multiplier, int32_t result_shift,
101 int32_t min, int32_t max)
102{
103 const int cols_in = in->shape().x();
104
105 for(int i = 0; i < in->num_elements(); ++i)
106 {
107 int32_t result = (*in)[i];
108
109 if(bias != nullptr)
110 {
111 result += (*bias)[i % cols_in];
112 }
113
114 // Fixed point multiplication
115 result = asymm_rounding_divide_by_pow2(asymm_int_mult(result, result_fixedpoint_multiplier), result_shift);
116
117 // Bounded ReLu
118 if(min != max)
119 {
120 result = std::max(min, std::min(max, result));
121 }
122
123 (*dst)[i] = static_cast<int16_t>(std::max(-32768, std::min(32767, result)));
124 }
125}
Gian Marco6b77e912017-11-17 09:27:57 +0000126} // namespace
127
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000128template <typename T_out, typename T_in>
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100129SimpleTensor<T_out> gemmlowp_matrix_multiply_core(const SimpleTensor<T_in> &a, const SimpleTensor<T_in> &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100130{
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000131 static_assert(std::is_same<typename std::decay<T_out>::type, int32_t>::value, "Only int32_t is allowed for the output");
Gian Marcoe75a02b2017-11-08 12:24:09 +0000132
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000133 DataType dt = std::is_same<T_out, int32_t>::value ? DataType::S32 : DataType::U32;
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100134 SimpleTensor<T_out> c(shape_c, dt);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000135
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100136 const int K = a.shape().x();
137 const int M = a.shape().y();
138 const int N = b.shape().x();
139 const int D = a.shape().z(); // Number of matrices in a batch
140
141 const int a_stride_z = K * M;
142 // Do not slide the matrix B along the 3rd dimension in case matrix B has less than 3 dimensions
143 const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0;
144 const int c_stride_z = N * M;
Gian Marcoe75a02b2017-11-08 12:24:09 +0000145
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000146 std::vector<T_out> acc;
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100147 acc.resize(N);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000148
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100149 for(int depth = 0; depth < D; ++depth)
Pablo Tello299025a2017-09-29 11:30:12 +0100150 {
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100151 const int base_addr_a = depth * a_stride_z;
152 const int base_addr_b = depth * b_stride_z;
153 const int base_addr_c = depth * c_stride_z;
154
155 for(int i = 0; i < M; ++i)
Pablo Tello299025a2017-09-29 11:30:12 +0100156 {
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100157 for(int j = 0; j < N; ++j)
Pablo Tello299025a2017-09-29 11:30:12 +0100158 {
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100159 acc[j] = 0;
Pablo Tello299025a2017-09-29 11:30:12 +0100160 }
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100161 for(int k = 0; k < K; ++k)
162 {
163 const T_out tmp_a = a_offset + static_cast<T_out>(a[base_addr_a + k + i * K]);
164 for(int j = 0; j < N; ++j)
165 {
166 const T_out tmp_b = b_offset + static_cast<T_out>(b[base_addr_b + j + k * N]);
167 const T_out mult_as_int = tmp_a * tmp_b;
168 acc[j] += mult_as_int;
169 }
170 }
171 for(int j = 0; j < N; ++j)
172 {
173 c[base_addr_c + j + i * N] = acc[j];
174 }
Pablo Tello299025a2017-09-29 11:30:12 +0100175 }
176 }
177
178 return c;
179}
180
Pablo Tello181e6512017-11-15 13:28:27 +0000181// used to validate assembly kernels which don't know anything about offsets
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000182template <typename T1, typename T2>
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100183SimpleTensor<T1> gemmlowp(const SimpleTensor<T2> &a, const SimpleTensor<T2> &b, TensorShape shape_c)
Pablo Tello181e6512017-11-15 13:28:27 +0000184{
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100185 return gemmlowp_matrix_multiply_core<T1, T2>(a, b, shape_c, 0, 0);
Pablo Tello181e6512017-11-15 13:28:27 +0000186}
187
Gian Marcoe75a02b2017-11-08 12:24:09 +0000188template <typename T>
Gian Marco6b77e912017-11-17 09:27:57 +0000189SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000190{
191 SimpleTensor<uint8_t> dst(in.shape(), DataType::QASYMM8);
192
Gian Marco6b77e912017-11-17 09:27:57 +0000193 quantize_down_int32_to_uint8_scale<T>(&in, nullptr, &dst, result_offset, result_mult_int, result_shift, min, max);
194
195 return dst;
196}
197
198template <typename T>
199SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_offset, int32_t result_mult_int, int32_t result_shift,
200 int32_t min, int32_t max)
201{
202 SimpleTensor<uint8_t> dst(in.shape(), DataType::QASYMM8);
203
204 quantize_down_int32_to_uint8_scale<T>(&in, &bias, &dst, result_offset, result_mult_int, result_shift, min, max);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000205
206 return dst;
207}
208
Gian Marco58c57942017-11-28 09:10:03 +0000209template <typename T>
210SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> &in, int32_t result_fixedpoint_multiplier, int32_t result_shift,
211 int32_t result_offset_after_shift, int32_t min,
212 int32_t max)
213{
214 SimpleTensor<uint8_t> dst(in.shape(), DataType::QASYMM8);
215
216 quantize_down_int32_to_uint8_scale_by_fixedpoint<T>(&in, nullptr, &dst, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
217
218 return dst;
219}
220
221template <typename T>
222SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_fixedpoint_multiplier, int32_t result_shift,
223 int32_t result_offset_after_shift, int32_t min, int32_t max)
224{
225 SimpleTensor<uint8_t> dst(in.shape(), DataType::QASYMM8);
226
227 quantize_down_int32_to_uint8_scale_by_fixedpoint<T>(&in, &bias, &dst, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
228
229 return dst;
230}
231
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100232template <typename T>
233SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, int32_t result_fixedpoint_multiplier, int32_t result_shift, int32_t min,
234 int32_t max)
235{
236 SimpleTensor<int16_t> dst(in.shape(), DataType::QSYMM16);
237
238 quantize_down_int32_to_int16_scale_by_fixedpoint<T>(&in, nullptr, &dst, result_fixedpoint_multiplier, result_shift, min, max);
239
240 return dst;
241}
242
243template <typename T>
244SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_fixedpoint_multiplier, int32_t result_shift,
245 int32_t min, int32_t max)
246{
247 SimpleTensor<int16_t> dst(in.shape(), DataType::QSYMM16);
248
249 quantize_down_int32_to_int16_scale_by_fixedpoint<T>(&in, &bias, &dst, result_fixedpoint_multiplier, result_shift, min, max);
250
251 return dst;
252}
253
Gian Marco58c57942017-11-28 09:10:03 +0000254template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, int32_t result_fixedpoint_multiplier, int32_t result_shift,
255 int32_t result_offset_after_shift, int32_t min, int32_t max);
256template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_fixedpoint_multiplier,
257 int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max);
Gian Marco Iodicebc415af2019-06-13 15:58:32 +0100258template SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, int32_t result_fixedpoint_multiplier, int32_t result_shift,
259 int32_t min, int32_t max);
260template SimpleTensor<int16_t> gemmlowp_quantize_down_int32_to_int16_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_fixedpoint_multiplier,
261 int32_t result_shift, int32_t min, int32_t max);
Gian Marco6b77e912017-11-17 09:27:57 +0000262template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min,
263 int32_t max);
264template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_offset, int32_t result_mult_int,
265 int32_t result_shift, int32_t min, int32_t max);
Georgios Pinitasebf6b8a2018-09-24 16:31:08 +0100266template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset);
267template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset);
268template SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, TensorShape shape_c);
269template SimpleTensor<int32_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, TensorShape shape_c);
Pablo Tello299025a2017-09-29 11:30:12 +0100270} // namespace reference
271} // namespace validation
272} // namespace test
273} // namespace arm_compute