blob: 8e41aef46ac5efc950ad5d58edae71261622f756 [file] [log] [blame]
Pablo Tello299025a2017-09-29 11:30:12 +01001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Gian Marcoe75a02b2017-11-08 12:24:09 +000024#include "GEMMLowp.h"
Pablo Tello299025a2017-09-29 11:30:12 +010025
26#include "arm_compute/core/Types.h"
Georgios Pinitas5a7e7762017-12-01 16:27:29 +000027#include "tests/validation/reference/UtilsQuantizedAsymm.h"
Gian Marco58c57942017-11-28 09:10:03 +000028
29#include <limits>
Pablo Tello299025a2017-09-29 11:30:12 +010030
31namespace arm_compute
32{
33namespace test
34{
35namespace validation
36{
37namespace reference
38{
Gian Marco6b77e912017-11-17 09:27:57 +000039namespace
40{
41template <typename T>
42void quantize_down_int32_to_uint8_scale(const SimpleTensor<T> *in, const SimpleTensor<T> *bias, SimpleTensor<uint8_t> *dst, int32_t result_offset, int32_t result_mult_int, int32_t result_shift,
43 int32_t min, int32_t max)
44{
45 const int cols_in = in->shape().x();
46
47 for(int i = 0; i < in->num_elements(); ++i)
48 {
Gian Marco58c57942017-11-28 09:10:03 +000049 int32_t result = ((*in)[i] + result_offset);
Gian Marco6b77e912017-11-17 09:27:57 +000050
51 if(bias != nullptr)
52 {
53 result += (*bias)[i % cols_in];
54 }
55
Gian Marco58c57942017-11-28 09:10:03 +000056 result *= result_mult_int;
57
Gian Marco6b77e912017-11-17 09:27:57 +000058 result >>= result_shift;
59
60 // Bounded ReLu
61 if(min != max)
62 {
63 result = std::max(min, std::min(max, result));
64 }
65
66 (*dst)[i] = static_cast<uint8_t>(std::max(0, std::min(255, result)));
67 }
68}
Gian Marco58c57942017-11-28 09:10:03 +000069
70template <typename T>
71void quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> *in, const SimpleTensor<T> *bias, SimpleTensor<uint8_t> *dst, int32_t result_fixedpoint_multiplier, int32_t result_shift,
72 int32_t result_offset_after_shift, int32_t min, int32_t max)
73{
74 const int cols_in = in->shape().x();
75
76 for(int i = 0; i < in->num_elements(); ++i)
77 {
78 int32_t result = (*in)[i];
79
80 if(bias != nullptr)
81 {
82 result += (*bias)[i % cols_in];
83 }
84
85 // Fixed point multiplication
86 result = asymm_rounding_divide_by_pow2(asymm_int_mult(result, result_fixedpoint_multiplier), result_shift);
87 result += result_offset_after_shift;
88
89 // Bounded ReLu
90 if(min != max)
91 {
92 result = std::max(min, std::min(max, result));
93 }
94
95 (*dst)[i] = static_cast<uint8_t>(std::max(0, std::min(255, result)));
96 }
97}
Gian Marco6b77e912017-11-17 09:27:57 +000098} // namespace
99
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000100template <typename T_out, typename T_in>
101SimpleTensor<T_out> gemmlowp_matrix_multiply_core(const SimpleTensor<T_in> &a, const SimpleTensor<T_in> &b, int32_t a_offset, int32_t b_offset)
Pablo Tello299025a2017-09-29 11:30:12 +0100102{
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000103 static_assert(std::is_same<typename std::decay<T_out>::type, int32_t>::value, "Only int32_t is allowed for the output");
Gian Marcoe75a02b2017-11-08 12:24:09 +0000104
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000105 TensorShape shape(b.shape()[0], a.shape()[1]);
106 DataType dt = std::is_same<T_out, int32_t>::value ? DataType::S32 : DataType::U32;
107 SimpleTensor<T_out> c(shape, dt);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000108
109 const int K = a.shape().x();
110 const int b_width = b.shape().x();
111 const int rows = c.shape().y(); //M
112 const int cols = c.shape().x(); //N
113
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000114 std::vector<T_out> acc;
Pablo Tello299025a2017-09-29 11:30:12 +0100115 acc.resize(cols);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000116
Pablo Tello299025a2017-09-29 11:30:12 +0100117 for(int i = 0; i < rows; ++i)
118 {
119 for(int j = 0; j < cols; ++j)
120 {
121 acc[j] = 0;
122 }
123 for(int k = 0; k < K; ++k)
124 {
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000125 const T_out tmp_a = a_offset + static_cast<T_out>(a[k + i * K]);
Pablo Tello299025a2017-09-29 11:30:12 +0100126 for(int j = 0; j < b_width; ++j)
127 {
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000128 const T_out tmp_b = b_offset + static_cast<T_out>(b[j + k * b_width]);
129 const T_out mult_as_int = tmp_a * tmp_b;
Pablo Tello299025a2017-09-29 11:30:12 +0100130 acc[j] += mult_as_int;
131 }
132 }
133 for(int j = 0; j < cols; ++j)
134 {
Gian Marcoe75a02b2017-11-08 12:24:09 +0000135 c[j + i * cols] = acc[j];
Pablo Tello299025a2017-09-29 11:30:12 +0100136 }
137 }
138
139 return c;
140}
141
Pablo Tello181e6512017-11-15 13:28:27 +0000142// used to validate assembly kernels which don't know anything about offsets
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000143template <typename T1, typename T2>
144SimpleTensor<T1> gemmlowp(const SimpleTensor<T2> &a, const SimpleTensor<T2> &b)
Pablo Tello181e6512017-11-15 13:28:27 +0000145{
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000146 return gemmlowp_matrix_multiply_core<T1, T2>(a, b, 0, 0);
Pablo Tello181e6512017-11-15 13:28:27 +0000147}
148
Gian Marcoe75a02b2017-11-08 12:24:09 +0000149template <typename T>
Gian Marco6b77e912017-11-17 09:27:57 +0000150SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min, int32_t max)
Gian Marcoe75a02b2017-11-08 12:24:09 +0000151{
152 SimpleTensor<uint8_t> dst(in.shape(), DataType::QASYMM8);
153
Gian Marco6b77e912017-11-17 09:27:57 +0000154 quantize_down_int32_to_uint8_scale<T>(&in, nullptr, &dst, result_offset, result_mult_int, result_shift, min, max);
155
156 return dst;
157}
158
159template <typename T>
160SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_offset, int32_t result_mult_int, int32_t result_shift,
161 int32_t min, int32_t max)
162{
163 SimpleTensor<uint8_t> dst(in.shape(), DataType::QASYMM8);
164
165 quantize_down_int32_to_uint8_scale<T>(&in, &bias, &dst, result_offset, result_mult_int, result_shift, min, max);
Gian Marcoe75a02b2017-11-08 12:24:09 +0000166
167 return dst;
168}
169
Gian Marco58c57942017-11-28 09:10:03 +0000170template <typename T>
171SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> &in, int32_t result_fixedpoint_multiplier, int32_t result_shift,
172 int32_t result_offset_after_shift, int32_t min,
173 int32_t max)
174{
175 SimpleTensor<uint8_t> dst(in.shape(), DataType::QASYMM8);
176
177 quantize_down_int32_to_uint8_scale_by_fixedpoint<T>(&in, nullptr, &dst, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
178
179 return dst;
180}
181
182template <typename T>
183SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<T> &in, const SimpleTensor<T> &bias, int32_t result_fixedpoint_multiplier, int32_t result_shift,
184 int32_t result_offset_after_shift, int32_t min, int32_t max)
185{
186 SimpleTensor<uint8_t> dst(in.shape(), DataType::QASYMM8);
187
188 quantize_down_int32_to_uint8_scale_by_fixedpoint<T>(&in, &bias, &dst, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max);
189
190 return dst;
191}
192
193template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, int32_t result_fixedpoint_multiplier, int32_t result_shift,
194 int32_t result_offset_after_shift, int32_t min, int32_t max);
195template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_fixedpoint_multiplier,
196 int32_t result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max);
Gian Marco6b77e912017-11-17 09:27:57 +0000197template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, int32_t result_offset, int32_t result_mult_int, int32_t result_shift, int32_t min,
198 int32_t max);
199template SimpleTensor<uint8_t> gemmlowp_quantize_down_int32_to_uint8_scale(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_offset, int32_t result_mult_int,
200 int32_t result_shift, int32_t min, int32_t max);
Michalis Spyrouf3dfa272017-11-21 17:52:12 +0000201template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, int32_t a_offset, int32_t b_offset);
202template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, int32_t a_offset, int32_t b_offset);
203template SimpleTensor<int32_t> gemmlowp(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b);
204template SimpleTensor<int32_t> gemmlowp(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b);
Pablo Tello299025a2017-09-29 11:30:12 +0100205} // namespace reference
206} // namespace validation
207} // namespace test
208} // namespace arm_compute