Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 1 | /* |
Michele Di Giorgio | d9eaf61 | 2020-07-08 11:12:57 +0100 | [diff] [blame] | 2 | * Copyright (c) 2017-2020 Arm Limited. |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 3 | * |
| 4 | * SPDX-License-Identifier: MIT |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to |
| 8 | * deal in the Software without restriction, including without limitation the |
| 9 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 10 | * sell copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in all |
| 14 | * copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 19 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 22 | * SOFTWARE. |
| 23 | */ |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 24 | #include "GEMMLowp.h" |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 25 | |
| 26 | #include "arm_compute/core/Types.h" |
Georgios Pinitas | 5a7e776 | 2017-12-01 16:27:29 +0000 | [diff] [blame] | 27 | #include "tests/validation/reference/UtilsQuantizedAsymm.h" |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 28 | |
Georgios Pinitas | afc630f | 2020-03-30 14:09:27 +0100 | [diff] [blame] | 29 | #include "support/ToolchainSupport.h" |
| 30 | |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 31 | #include <limits> |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 32 | |
| 33 | namespace arm_compute |
| 34 | { |
| 35 | namespace test |
| 36 | { |
| 37 | namespace validation |
| 38 | { |
| 39 | namespace reference |
| 40 | { |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 41 | namespace |
| 42 | { |
| 43 | template <typename T> |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 44 | struct DataTypeExtractor |
| 45 | { |
| 46 | static DataType data_type() |
| 47 | { |
| 48 | DataType data_type = DataType::UNKNOWN; |
| 49 | if(std::is_same<T, int8_t>::value) |
| 50 | { |
| 51 | data_type = DataType::QASYMM8_SIGNED; |
| 52 | } |
| 53 | else if(std::is_same<T, uint8_t>::value) |
| 54 | { |
| 55 | data_type = DataType::QASYMM8; |
| 56 | } |
| 57 | else if(std::is_same<T, int16_t>::value) |
| 58 | { |
| 59 | data_type = DataType::QSYMM16; |
| 60 | } |
| 61 | return data_type; |
| 62 | } |
| 63 | }; |
| 64 | |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame] | 65 | template <typename TIn, typename TOut> |
| 66 | void quantize_down_scale(const SimpleTensor<TIn> *in, const SimpleTensor<TIn> *bias, SimpleTensor<TOut> *dst, int32_t result_offset, std::vector<int32_t> result_mult_int, |
| 67 | std::vector<int32_t> result_shift, int32_t min, int32_t max) |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 68 | { |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 69 | const int cols_in = in->shape().x(); |
| 70 | const bool is_per_channel = result_mult_int.size() > 1; |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 71 | |
Michalis Spyrou | d1d7722 | 2020-04-08 14:10:15 +0100 | [diff] [blame] | 72 | #if defined(_OPENMP) |
| 73 | #pragma omp parallel for |
| 74 | #endif /* _OPENMP */ |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 75 | for(int i = 0; i < in->num_elements(); ++i) |
| 76 | { |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 77 | int32_t result = ((*in)[i] + result_offset); |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 78 | |
| 79 | if(bias != nullptr) |
| 80 | { |
| 81 | result += (*bias)[i % cols_in]; |
| 82 | } |
| 83 | |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 84 | result *= (is_per_channel) ? result_mult_int[i % cols_in] : result_mult_int[0]; |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 85 | |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 86 | result >>= (is_per_channel) ? result_shift[i % cols_in] : result_shift[0]; |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 87 | |
| 88 | // Bounded ReLu |
| 89 | if(min != max) |
| 90 | { |
| 91 | result = std::max(min, std::min(max, result)); |
| 92 | } |
| 93 | |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame] | 94 | (*dst)[i] = static_cast<TOut>(std::max<TIn>(std::numeric_limits<TOut>::lowest(), |
| 95 | std::min<TIn>(std::numeric_limits<TOut>::max(), result))); |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 96 | } |
| 97 | } |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 98 | |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 99 | template <typename TIn, typename TOut> |
| 100 | void quantize_down_scale_by_fixedpoint(const SimpleTensor<TIn> *in, const SimpleTensor<TIn> *bias, SimpleTensor<TOut> *dst, std::vector<int32_t> result_fixedpoint_multiplier, |
| 101 | std::vector<int32_t> result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max) |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 102 | { |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 103 | const int cols_in = in->shape().x(); |
| 104 | const bool is_per_channel = result_fixedpoint_multiplier.size() > 1; |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 105 | |
Michalis Spyrou | d1d7722 | 2020-04-08 14:10:15 +0100 | [diff] [blame] | 106 | #if defined(_OPENMP) |
| 107 | #pragma omp parallel for |
| 108 | #endif /* _OPENMP */ |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 109 | for(int i = 0; i < in->num_elements(); ++i) |
| 110 | { |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 111 | TIn result = (*in)[i]; |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 112 | |
| 113 | if(bias != nullptr) |
| 114 | { |
| 115 | result += (*bias)[i % cols_in]; |
| 116 | } |
| 117 | |
| 118 | // Fixed point multiplication |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 119 | const int32_t multiplier = (is_per_channel) ? result_fixedpoint_multiplier[i % cols_in] : result_fixedpoint_multiplier[0]; |
| 120 | const int32_t shift = (is_per_channel) ? result_shift[i % cols_in] : result_shift[0]; |
| 121 | |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 122 | if(shift < 0) |
| 123 | { |
| 124 | result = asymm_int_mult(result * (1 << (-shift)), multiplier); |
| 125 | } |
| 126 | else |
| 127 | { |
| 128 | result = asymm_rounding_divide_by_pow2(asymm_int_mult(result, multiplier), shift); |
| 129 | } |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 130 | result += result_offset_after_shift; |
| 131 | |
| 132 | // Bounded ReLu |
| 133 | if(min != max) |
| 134 | { |
| 135 | result = std::max(min, std::min(max, result)); |
| 136 | } |
| 137 | |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 138 | (*dst)[i] = static_cast<TOut>(std::max<TIn>(std::numeric_limits<TOut>::lowest(), |
| 139 | std::min<TIn>(std::numeric_limits<TOut>::max(), result))); |
Gian Marco Iodice | bc415af | 2019-06-13 15:58:32 +0100 | [diff] [blame] | 140 | } |
| 141 | } |
Sheri Zhang | 1b14c75 | 2020-03-09 14:29:52 +0000 | [diff] [blame] | 142 | |
| 143 | template <typename TIn, typename TOut> |
| 144 | void quantize_down_scale_by_float(const SimpleTensor<TIn> *in, const SimpleTensor<TIn> *bias, SimpleTensor<TOut> *dst, std::vector<float_t> result_real_multiplier, |
| 145 | int32_t result_offset, int32_t min, int32_t max) |
| 146 | { |
| 147 | const int cols_in = in->shape().x(); |
| 148 | const bool is_per_channel = result_real_multiplier.size() > 1; |
| 149 | |
Michalis Spyrou | d1d7722 | 2020-04-08 14:10:15 +0100 | [diff] [blame] | 150 | #if defined(_OPENMP) |
| 151 | #pragma omp parallel for |
| 152 | #endif /* _OPENMP */ |
Sheri Zhang | 1b14c75 | 2020-03-09 14:29:52 +0000 | [diff] [blame] | 153 | for(int i = 0; i < in->num_elements(); ++i) |
| 154 | { |
| 155 | TIn result = (*in)[i]; |
| 156 | |
| 157 | if(bias != nullptr) |
| 158 | { |
| 159 | result += (*bias)[i % cols_in]; |
| 160 | } |
| 161 | |
| 162 | // Float multiplication |
| 163 | const float_t multiplier = (is_per_channel) ? result_real_multiplier[i % cols_in] : result_real_multiplier[0]; |
| 164 | |
| 165 | float_t result_f = static_cast<float_t>(result) * multiplier + static_cast<float_t>(result_offset); |
Georgios Pinitas | afc630f | 2020-03-30 14:09:27 +0100 | [diff] [blame] | 166 | result = static_cast<TIn>(support::cpp11::round(result_f)); |
Sheri Zhang | 1b14c75 | 2020-03-09 14:29:52 +0000 | [diff] [blame] | 167 | |
| 168 | // Bounded ReLu |
| 169 | if(min != max) |
| 170 | { |
| 171 | result = std::max(min, std::min(max, result)); |
| 172 | } |
| 173 | |
| 174 | (*dst)[i] = static_cast<TOut>(std::max<TIn>(std::numeric_limits<TOut>::lowest(), |
| 175 | std::min<TIn>(std::numeric_limits<TOut>::max(), result))); |
| 176 | } |
| 177 | } |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 178 | } // namespace |
| 179 | |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 180 | template <typename T_out, typename T_in, typename T_in_1> |
| 181 | SimpleTensor<T_out> gemmlowp_matrix_multiply_core(const SimpleTensor<T_in> &a, const SimpleTensor<T_in_1> &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset) |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 182 | { |
Michalis Spyrou | f3dfa27 | 2017-11-21 17:52:12 +0000 | [diff] [blame] | 183 | static_assert(std::is_same<typename std::decay<T_out>::type, int32_t>::value, "Only int32_t is allowed for the output"); |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 184 | |
Michalis Spyrou | f3dfa27 | 2017-11-21 17:52:12 +0000 | [diff] [blame] | 185 | DataType dt = std::is_same<T_out, int32_t>::value ? DataType::S32 : DataType::U32; |
Georgios Pinitas | ebf6b8a | 2018-09-24 16:31:08 +0100 | [diff] [blame] | 186 | SimpleTensor<T_out> c(shape_c, dt); |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 187 | |
Georgios Pinitas | ebf6b8a | 2018-09-24 16:31:08 +0100 | [diff] [blame] | 188 | const int K = a.shape().x(); |
| 189 | const int M = a.shape().y(); |
| 190 | const int N = b.shape().x(); |
| 191 | const int D = a.shape().z(); // Number of matrices in a batch |
| 192 | |
| 193 | const int a_stride_z = K * M; |
| 194 | // Do not slide the matrix B along the 3rd dimension in case matrix B has less than 3 dimensions |
| 195 | const int b_stride_z = b.shape().num_dimensions() > 2 ? N * K : 0; |
| 196 | const int c_stride_z = N * M; |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 197 | |
Michalis Spyrou | f3dfa27 | 2017-11-21 17:52:12 +0000 | [diff] [blame] | 198 | std::vector<T_out> acc; |
Georgios Pinitas | ebf6b8a | 2018-09-24 16:31:08 +0100 | [diff] [blame] | 199 | acc.resize(N); |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 200 | |
Georgios Pinitas | ebf6b8a | 2018-09-24 16:31:08 +0100 | [diff] [blame] | 201 | for(int depth = 0; depth < D; ++depth) |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 202 | { |
Georgios Pinitas | ebf6b8a | 2018-09-24 16:31:08 +0100 | [diff] [blame] | 203 | const int base_addr_a = depth * a_stride_z; |
| 204 | const int base_addr_b = depth * b_stride_z; |
| 205 | const int base_addr_c = depth * c_stride_z; |
| 206 | |
| 207 | for(int i = 0; i < M; ++i) |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 208 | { |
Georgios Pinitas | ebf6b8a | 2018-09-24 16:31:08 +0100 | [diff] [blame] | 209 | for(int j = 0; j < N; ++j) |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 210 | { |
Georgios Pinitas | ebf6b8a | 2018-09-24 16:31:08 +0100 | [diff] [blame] | 211 | acc[j] = 0; |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 212 | } |
Georgios Pinitas | ebf6b8a | 2018-09-24 16:31:08 +0100 | [diff] [blame] | 213 | for(int k = 0; k < K; ++k) |
| 214 | { |
| 215 | const T_out tmp_a = a_offset + static_cast<T_out>(a[base_addr_a + k + i * K]); |
| 216 | for(int j = 0; j < N; ++j) |
| 217 | { |
| 218 | const T_out tmp_b = b_offset + static_cast<T_out>(b[base_addr_b + j + k * N]); |
| 219 | const T_out mult_as_int = tmp_a * tmp_b; |
| 220 | acc[j] += mult_as_int; |
| 221 | } |
| 222 | } |
| 223 | for(int j = 0; j < N; ++j) |
| 224 | { |
| 225 | c[base_addr_c + j + i * N] = acc[j]; |
| 226 | } |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 227 | } |
| 228 | } |
| 229 | |
| 230 | return c; |
| 231 | } |
| 232 | |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 233 | // used to validate assembly kernels which don't know anything about offsets |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 234 | template <typename T1, typename T2, typename T3> |
| 235 | SimpleTensor<T1> gemmlowp(const SimpleTensor<T2> &a, const SimpleTensor<T3> &b, TensorShape shape_c) |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 236 | { |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 237 | return gemmlowp_matrix_multiply_core<T1, T2, T3>(a, b, shape_c, 0, 0); |
Pablo Tello | 181e651 | 2017-11-15 13:28:27 +0000 | [diff] [blame] | 238 | } |
| 239 | |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame] | 240 | template <typename TIn, typename TOut> |
| 241 | SimpleTensor<TOut> gemmlowp_quantize_down_scale(const SimpleTensor<TIn> &in, int32_t result_offset, std::vector<int32_t> result_mult_int, std::vector<int32_t> result_shift, |
| 242 | int32_t min, int32_t max) |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 243 | { |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame] | 244 | SimpleTensor<TOut> dst(in.shape(), DataTypeExtractor<TOut>::data_type()); |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 245 | |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame] | 246 | quantize_down_scale<TIn, TOut>(&in, nullptr, &dst, result_offset, result_mult_int, result_shift, min, max); |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 247 | |
| 248 | return dst; |
| 249 | } |
| 250 | |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame] | 251 | template <typename TIn, typename TOut> |
| 252 | SimpleTensor<TOut> gemmlowp_quantize_down_scale(const SimpleTensor<TIn> &in, const SimpleTensor<TIn> &bias, int32_t result_offset, std::vector<int32_t> result_mult_int, |
| 253 | std::vector<int32_t> result_shift, int32_t min, int32_t max) |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 254 | { |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame] | 255 | SimpleTensor<TOut> dst(in.shape(), DataTypeExtractor<TOut>::data_type()); |
Gian Marco | 6b77e91 | 2017-11-17 09:27:57 +0000 | [diff] [blame] | 256 | |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame] | 257 | quantize_down_scale<TIn, TOut>(&in, &bias, &dst, result_offset, result_mult_int, result_shift, min, max); |
Gian Marco | e75a02b | 2017-11-08 12:24:09 +0000 | [diff] [blame] | 258 | |
| 259 | return dst; |
| 260 | } |
| 261 | |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 262 | template <typename TIn, typename TOut> |
| 263 | SimpleTensor<TOut> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<TIn> &in, std::vector<int32_t> result_fixedpoint_multiplier, std::vector<int32_t> result_shift, |
| 264 | int32_t result_offset_after_shift, int32_t min, int32_t max) |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 265 | { |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 266 | SimpleTensor<TOut> dst(in.shape(), DataTypeExtractor<TOut>::data_type()); |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 267 | |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 268 | quantize_down_scale_by_fixedpoint<TIn, TOut>(&in, nullptr, &dst, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 269 | |
| 270 | return dst; |
| 271 | } |
| 272 | |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 273 | template <typename TIn, typename TOut> |
| 274 | SimpleTensor<TOut> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<TIn> &in, const SimpleTensor<TIn> &bias, std::vector<int32_t> result_fixedpoint_multiplier, |
| 275 | std::vector<int32_t> result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max) |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 276 | { |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 277 | SimpleTensor<TOut> dst(in.shape(), DataTypeExtractor<TOut>::data_type()); |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 278 | |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 279 | quantize_down_scale_by_fixedpoint<TIn, TOut>(&in, &bias, &dst, result_fixedpoint_multiplier, result_shift, result_offset_after_shift, min, max); |
Gian Marco | 58c5794 | 2017-11-28 09:10:03 +0000 | [diff] [blame] | 280 | |
| 281 | return dst; |
| 282 | } |
| 283 | |
Sheri Zhang | 1b14c75 | 2020-03-09 14:29:52 +0000 | [diff] [blame] | 284 | template <typename TIn, typename TOut> |
| 285 | SimpleTensor<TOut> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<TIn> &in, const SimpleTensor<TIn> &bias, |
| 286 | std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max) |
| 287 | { |
| 288 | SimpleTensor<TOut> dst(in.shape(), DataTypeExtractor<TOut>::data_type()); |
| 289 | |
| 290 | quantize_down_scale_by_float<TIn, TOut>(&in, &bias, &dst, result_real_multiplier, result_offset, min, max); |
| 291 | |
| 292 | return dst; |
| 293 | } |
| 294 | |
| 295 | template <typename TIn, typename TOut> |
| 296 | SimpleTensor<TOut> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<TIn> &in, |
| 297 | std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max) |
| 298 | { |
| 299 | SimpleTensor<TOut> dst(in.shape(), DataTypeExtractor<TOut>::data_type()); |
| 300 | |
| 301 | quantize_down_scale_by_float<TIn, TOut>(&in, nullptr, &dst, result_real_multiplier, result_offset, min, max); |
| 302 | |
| 303 | return dst; |
| 304 | } |
| 305 | |
| 306 | template SimpleTensor<uint8_t> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, |
| 307 | std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max); |
| 308 | template SimpleTensor<uint8_t> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<int32_t> &a, |
| 309 | std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max); |
| 310 | template SimpleTensor<int8_t> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, |
| 311 | std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max); |
| 312 | template SimpleTensor<int8_t> gemmlowp_quantize_down_scale_by_float(const SimpleTensor<int32_t> &a, |
| 313 | std::vector<float_t> result_real_multiplier, int32_t result_offset, int32_t min, int32_t max); |
Georgios Pinitas | 448a81f | 2019-11-21 14:10:25 +0000 | [diff] [blame] | 314 | template SimpleTensor<uint8_t> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, std::vector<int32_t> result_fixedpoint_multiplier, |
| 315 | std::vector<int32_t> result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max); |
| 316 | template SimpleTensor<uint8_t> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, |
| 317 | std::vector<int32_t> result_fixedpoint_multiplier, |
| 318 | std::vector<int32_t> result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max); |
| 319 | template SimpleTensor<int8_t> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, std::vector<int32_t> result_fixedpoint_multiplier, |
| 320 | std::vector<int32_t> result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max); |
| 321 | template SimpleTensor<int8_t> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, |
| 322 | std::vector<int32_t> result_fixedpoint_multiplier, |
| 323 | std::vector<int32_t> result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max); |
| 324 | template SimpleTensor<int16_t> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, std::vector<int32_t> result_fixedpoint_multiplier, |
| 325 | std::vector<int32_t> result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max); |
| 326 | template SimpleTensor<int16_t> gemmlowp_quantize_down_scale_by_fixedpoint(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, |
| 327 | std::vector<int32_t> result_fixedpoint_multiplier, |
| 328 | std::vector<int32_t> result_shift, int32_t result_offset_after_shift, int32_t min, int32_t max); |
Manuel Bottini | 959c26d | 2019-12-02 16:22:35 +0000 | [diff] [blame] | 329 | template SimpleTensor<uint8_t> gemmlowp_quantize_down_scale(const SimpleTensor<int32_t> &a, int32_t result_offset, std::vector<int32_t> result_mult_int, |
| 330 | std::vector<int32_t> result_shift, int32_t min, int32_t max); |
| 331 | template SimpleTensor<uint8_t> gemmlowp_quantize_down_scale(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_offset, std::vector<int32_t> result_mult_int, |
| 332 | std::vector<int32_t> result_shift, int32_t min, int32_t max); |
| 333 | template SimpleTensor<int8_t> gemmlowp_quantize_down_scale(const SimpleTensor<int32_t> &a, int32_t result_offset, std::vector<int32_t> result_mult_int, |
| 334 | std::vector<int32_t> result_shift, int32_t min, int32_t max); |
| 335 | template SimpleTensor<int8_t> gemmlowp_quantize_down_scale(const SimpleTensor<int32_t> &a, const SimpleTensor<int32_t> &b, int32_t result_offset, std::vector<int32_t> result_mult_int, |
| 336 | std::vector<int32_t> result_shift, int32_t min, int32_t max); |
Georgios Pinitas | ebf6b8a | 2018-09-24 16:31:08 +0100 | [diff] [blame] | 337 | template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset); |
| 338 | template SimpleTensor<int32_t> gemmlowp_matrix_multiply_core(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, TensorShape shape_c, int32_t a_offset, int32_t b_offset); |
Vidhya Sudhan Loganathan | 951b8a4 | 2019-11-04 14:42:08 +0000 | [diff] [blame] | 339 | template SimpleTensor<int32_t> gemmlowp<int32_t, int8_t, int8_t>(const SimpleTensor<int8_t> &a, const SimpleTensor<int8_t> &b, TensorShape shape_c); |
| 340 | template SimpleTensor<int32_t> gemmlowp<int32_t, uint8_t, uint8_t>(const SimpleTensor<uint8_t> &a, const SimpleTensor<uint8_t> &b, TensorShape shape_c); |
| 341 | template SimpleTensor<int32_t> gemmlowp<int32_t, uint8_t, int8_t>(const SimpleTensor<uint8_t> &a, const SimpleTensor<int8_t> &b, TensorShape shape_c); |
Pablo Tello | 299025a | 2017-09-29 11:30:12 +0100 | [diff] [blame] | 342 | } // namespace reference |
| 343 | } // namespace validation |
| 344 | } // namespace test |
| 345 | } // namespace arm_compute |