blob: 06cb759b165f85c06e67b6fc72c4d57069bca37b [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
Giorgio Arena1856ff72020-02-07 13:46:45 +00002 * Copyright (c) 2017-2020 ARM Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Manuel Bottini1f332d42019-11-29 17:25:25 +000024#ifndef ARM_COMPUTE_CLGEMMLOWPOUTPUTSTAGE_H
25#define ARM_COMPUTE_CLGEMMLOWPOUTPUTSTAGE_H
Gian Marco05288a22017-11-21 10:57:50 +000026
27#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
28
29/** This file contains all available output stages for GEMMLowp on OpenCL.
30 *
31 * In gemmlowp, the "output stage" is the process that takes a final int32 accumulator value (the output of @ref CLGEMMLowpMatrixMultiplyCore),
Manuel Bottini1f332d42019-11-29 17:25:25 +000032 * and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
Gian Marco05288a22017-11-21 10:57:50 +000033 *
34 * More information about the GEMMLowp output stage can be found at https://github.com/google/gemmlowp/blob/master/doc/output.md
35 */
36
37namespace arm_compute
38{
39class ITensor;
40
41/** Basic function to execute CLGEMMLowpQuantizeDownInt32ToUint8Scale on OpenCL.
42 *
43 * CLGEMMLowpQuantizeDownInt32ToUint8Scale depends on 3 parameters: result_offset, result_mult_int, result_shift
44 * The final result is:
45 *
46 * ((input[i][k] + result_offset) * result_mult_int) >> result_shift
47 *
48 * In case the bias tensor is provided, the final result is:
49 *
Gian Marco58c57942017-11-28 09:10:03 +000050 * ((input[i][k] + bias[k] + result_offset) * result_mult_int) >> result_shift
Gian Marco05288a22017-11-21 10:57:50 +000051 *
52 * This function calls the following OpenCL kernels:
53 *
Luca Foschiani689c9682020-02-26 14:30:14 +000054 * -# @ref CLGEMMLowpQuantizeDownInt32ScaleKernel
Gian Marco05288a22017-11-21 10:57:50 +000055 *
56 * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
Gian Marco58c57942017-11-28 09:10:03 +000057 * after the result is shifted right by result_shift
Gian Marco05288a22017-11-21 10:57:50 +000058*/
59class CLGEMMLowpQuantizeDownInt32ToUint8Scale : public ICLSimpleFunction
60{
61public:
62 /** Initialise the kernel's inputs, output
63 *
64 * @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
65 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
66 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +000067 * @param[out] output Output tensor. Data type supported: QASYMM8
Gian Marco05288a22017-11-21 10:57:50 +000068 * @param[in] result_offset Offset to be added to each element of the input matrix
69 * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
70 * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
Giorgio Arena1856ff72020-02-07 13:46:45 +000071 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
Gian Marco05288a22017-11-21 10:57:50 +000072 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
Giorgio Arena1856ff72020-02-07 13:46:45 +000073 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Gian Marco05288a22017-11-21 10:57:50 +000074 */
Luca Foschiani689c9682020-02-26 14:30:14 +000075 ARM_COMPUTE_DEPRECATED_REL(20.05)
Giorgio Arena1856ff72020-02-07 13:46:45 +000076 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_offset, int result_mult_int, int result_shift, int min = std::numeric_limits<int32_t>::lowest(),
77 int max = std::numeric_limits<int32_t>::max());
Manuel Bottini2b84be52020-04-08 10:15:51 +010078 /** Initialise the kernel's inputs, output
79 *
80 * @param[in] compile_context The compile context to be used.
81 * @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
82 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
83 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
84 * @param[out] output Output tensor. Data type supported: QASYMM8
85 * @param[in] result_offset Offset to be added to each element of the input matrix
86 * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
87 * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
88 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
89 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
90 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
91 */
92 ARM_COMPUTE_DEPRECATED_REL(20.05)
93 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_offset, int result_mult_int, int result_shift,
94 int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Gian Marco58c57942017-11-28 09:10:03 +000095 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8Scale
96 *
97 * @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
98 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
99 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000100 * @param[in] output Output tensor. Data type supported: QASYMM8
Giorgio Arena1856ff72020-02-07 13:46:45 +0000101 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
Gian Marco58c57942017-11-28 09:10:03 +0000102 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
Giorgio Arena1856ff72020-02-07 13:46:45 +0000103 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Gian Marco58c57942017-11-28 09:10:03 +0000104 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000105 * @return a status
Gian Marco58c57942017-11-28 09:10:03 +0000106 */
Luca Foschiani689c9682020-02-26 14:30:14 +0000107 ARM_COMPUTE_DEPRECATED_REL(20.05)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000108 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Gian Marco58c57942017-11-28 09:10:03 +0000109};
110
111/** Basic function to execute CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint on OpenCL.
112 *
113 * CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint depends on 3 parameters:
114 *
115 * result_fixedpoint_multiplier, result_shift, result_offset_after_shift
116 *
117 * The final result is:
118 *
119 * (FixedPointMul(input[i][k], result_fixedpoint_multiplier) >> result_shift) + result_offset_after_shift
120 *
121 * where FixedPointMul(x, y) is the nearest integer to the following
122 * mathematical expression, evaluated without overflow or intermediate rounding:
123 *
124 * (x * y) / 2^31
125 *
126 * For more information: https://github.com/google/gemmlowp/blob/master/public/output_stages.h#L68
127 *
128 * In case the bias tensor is provided, the final result is:
129 *
130 * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift
131 *
132 * This function calls the following OpenCL kernels:
133 *
134 * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel
135 *
136 * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
137 * after the result is shifted right by result_shift
138*/
139class CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint : public ICLSimpleFunction
140{
141public:
142 /** Initialise the kernel's inputs, output
143 *
144 * @param[in] input Input tensor. Data type supported: S32
145 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
146 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000147 * @param[out] output Output tensor. Data type supported: QASYMM8
Gian Marco58c57942017-11-28 09:10:03 +0000148 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
149 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
150 * @param[in] result_offset_after_shift Offset to be applied to result before converting it back to QASYMM8
Giorgio Arena1856ff72020-02-07 13:46:45 +0000151 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
Gian Marco58c57942017-11-28 09:10:03 +0000152 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
Giorgio Arena1856ff72020-02-07 13:46:45 +0000153 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Gian Marco58c57942017-11-28 09:10:03 +0000154 */
Georgios Pinitas932491f2018-09-21 16:33:15 +0100155 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
Giorgio Arena1856ff72020-02-07 13:46:45 +0000156 int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100157 /** Initialise the kernel's inputs, output
158 *
159 * @param[in] compile_context The compile context to be used.
160 * @param[in] input Input tensor. Data type supported: S32
161 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
162 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
163 * @param[out] output Output tensor. Data type supported: QASYMM8
164 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
165 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
166 * @param[in] result_offset_after_shift Offset to be applied to result before converting it back to QASYMM8
167 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
168 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
169 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
170 */
171 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift,
172 int result_offset_after_shift,
173 int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Gian Marco58c57942017-11-28 09:10:03 +0000174 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint
175 *
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100176 * @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
177 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
178 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000179 * @param[in] output Output tensor. Data type supported: QASYMM8
Giorgio Arena1856ff72020-02-07 13:46:45 +0000180 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100181 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
Giorgio Arena1856ff72020-02-07 13:46:45 +0000182 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Gian Marco58c57942017-11-28 09:10:03 +0000183 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000184 * @return a status
Gian Marco58c57942017-11-28 09:10:03 +0000185 */
Giorgio Arena1856ff72020-02-07 13:46:45 +0000186 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Gian Marco05288a22017-11-21 10:57:50 +0000187};
Georgios Pinitas51e53a32018-10-22 13:49:08 +0100188
Manuel Bottini1f332d42019-11-29 17:25:25 +0000189/** Basic function to execute CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint on OpenCL.
190 *
191 * CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint depends on 3 parameters:
192 *
193 * result_fixedpoint_multiplier, result_shift, result_offset_after_shift
194 *
195 * The final result is:
196 *
197 * (FixedPointMul(input[i][k], result_fixedpoint_multiplier) >> result_shift) + result_offset_after_shift
198 *
199 * where FixedPointMul(x, y) is the nearest integer to the following
200 * mathematical expression, evaluated without overflow or intermediate rounding:
201 *
202 * (x * y) / 2^31
203 *
204 * For more information: https://github.com/google/gemmlowp/blob/master/public/output_stages.h#L68
205 *
206 * In case the bias tensor is provided, the final result is:
207 *
208 * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift
209 *
210 * This function calls the following OpenCL kernels:
211 *
212 * -# @ref CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel
213 *
214 * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
215 * after the result is shifted right by result_shift
216*/
217class CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint : public ICLSimpleFunction
218{
219public:
220 /** Initialise the kernel's inputs, output
221 *
222 * @param[in] input Input tensor. Data type supported: S32
223 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
224 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000225 * @param[out] output Output tensor. Data type supported: QASYMM8_SIGNED
Manuel Bottini1f332d42019-11-29 17:25:25 +0000226 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
227 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
228 * @param[in] result_offset_after_shift Offset to be applied to result before converting it back to QASYMM8_SIGNED
Giorgio Arena1856ff72020-02-07 13:46:45 +0000229 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8_SIGNED. Defaults to the minimum possible 32-bit signed integer.
Manuel Bottini1f332d42019-11-29 17:25:25 +0000230 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8_SIGNED. Defaults to 0
Giorgio Arena1856ff72020-02-07 13:46:45 +0000231 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Manuel Bottini1f332d42019-11-29 17:25:25 +0000232 */
233 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
Giorgio Arena1856ff72020-02-07 13:46:45 +0000234 int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100235 /** Initialise the kernel's inputs, output
236 *
237 * @param[in] compile_context The compile context to be used.
238 * @param[in] input Input tensor. Data type supported: S32
239 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
240 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
241 * @param[out] output Output tensor. Data type supported: QASYMM8_SIGNED
242 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
243 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
244 * @param[in] result_offset_after_shift Offset to be applied to result before converting it back to QASYMM8_SIGNED
245 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8_SIGNED. Defaults to the minimum possible 32-bit signed integer.
246 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8_SIGNED. Defaults to 0
247 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
248 */
249 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift,
250 int result_offset_after_shift,
251 int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini1f332d42019-11-29 17:25:25 +0000252 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint
253 *
254 * @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
255 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
256 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000257 * @param[in] output Output tensor. Data type supported: QASYMM8_SIGNED
Giorgio Arena1856ff72020-02-07 13:46:45 +0000258 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8_SIGNED. Defaults to the minimum possible 32-bit signed integer.
Manuel Bottini1f332d42019-11-29 17:25:25 +0000259 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8_SIGNED. Defaults to 0
Giorgio Arena1856ff72020-02-07 13:46:45 +0000260 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Manuel Bottini1f332d42019-11-29 17:25:25 +0000261 *
262 * @return a status
263 */
Giorgio Arena1856ff72020-02-07 13:46:45 +0000264 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini1f332d42019-11-29 17:25:25 +0000265};
266
Georgios Pinitas51e53a32018-10-22 13:49:08 +0100267/** Basic function to execute CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat on OpenCL.
268 *
269 * This function calls the following OpenCL kernels:
270 *
Sheri Zhang1b14c752020-03-09 14:29:52 +0000271 * -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel
Georgios Pinitas51e53a32018-10-22 13:49:08 +0100272 *
273 * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
274 * after the result is shifted right by result_shift
275*/
276class CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloat : public ICLSimpleFunction
277{
278public:
279 /** Initialise the kernel's inputs, output
280 *
Gian Marco Iodice0c54a622018-10-30 12:20:03 +0000281 * @param[in] input Input tensor. Data type supported: S32
282 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
283 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000284 * @param[out] output Output tensor. Data type supported: QASYMM8
Gian Marco Iodice0c54a622018-10-30 12:20:03 +0000285 * @param[in] multiplier Float multiplier to be multiplied to each element of the input matrix
286 * @param[in] offset Offset to be applied to result before converting it back to QASYMM8
Giorgio Arena1856ff72020-02-07 13:46:45 +0000287 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
Gian Marco Iodice0c54a622018-10-30 12:20:03 +0000288 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
Giorgio Arena1856ff72020-02-07 13:46:45 +0000289 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Georgios Pinitas51e53a32018-10-22 13:49:08 +0100290 */
Sheri Zhang1b14c752020-03-09 14:29:52 +0000291 ARM_COMPUTE_DEPRECATED_REL(20.05)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000292 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, float multiplier, int offset, int min = std::numeric_limits<int32_t>::lowest(),
293 int max = std::numeric_limits<int32_t>::max());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100294 /** Initialise the kernel's inputs, output
295 *
296 * @param[in] compile_context The compile context to be used.
297 * @param[in] input Input tensor. Data type supported: S32
298 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
299 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
300 * @param[out] output Output tensor. Data type supported: QASYMM8
301 * @param[in] multiplier Float multiplier to be multiplied to each element of the input matrix
302 * @param[in] offset Offset to be applied to result before converting it back to QASYMM8
303 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
304 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
305 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
306 */
307 ARM_COMPUTE_DEPRECATED_REL(20.05)
308 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, float multiplier, int offset,
309 int min = std::numeric_limits<int32_t>::lowest(),
310 int max = std::numeric_limits<int32_t>::max());
Georgios Pinitas51e53a32018-10-22 13:49:08 +0100311 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint
312 *
Gian Marco Iodice0c54a622018-10-30 12:20:03 +0000313 * @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
314 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
315 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000316 * @param[in] output Output tensor. Data type supported: QASYMM8
Giorgio Arena1856ff72020-02-07 13:46:45 +0000317 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
Gian Marco Iodice0c54a622018-10-30 12:20:03 +0000318 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
Giorgio Arena1856ff72020-02-07 13:46:45 +0000319 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Georgios Pinitas51e53a32018-10-22 13:49:08 +0100320 *
321 * @return a status
322 */
Sheri Zhang1b14c752020-03-09 14:29:52 +0000323 ARM_COMPUTE_DEPRECATED_REL(20.05)
Giorgio Arena1856ff72020-02-07 13:46:45 +0000324 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Georgios Pinitas51e53a32018-10-22 13:49:08 +0100325};
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100326/** Basic function to execute CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint on OpenCL.
327 *
328 * CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint depends on 2 parameters:
329 *
330 * result_fixedpoint_multiplier, result_shift
331 *
332 * The final result is:
333 *
334 * (FixedPointMul(input[i][k], result_fixedpoint_multiplier) >> result_shift)
335 *
336 * where FixedPointMul(x, y) is the nearest integer to the following
337 * mathematical expression, evaluated without overflow or intermediate rounding:
338 *
339 * (x * y) / 2^31
340 *
341 * For more information: https://github.com/google/gemmlowp/blob/master/public/output_stages.h#L68
342 *
343 * In case the bias tensor is provided, the final result is:
344 *
345 * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift
346 *
347 * This function calls the following NEON kernels:
348 *
349 * -# @ref CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel
350 *
351 * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
352 * after the result is shifted right by result_shift
353*/
354class CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint : public ICLSimpleFunction
355{
356public:
357 /** Initialise the kernel's inputs, output
358 *
359 * @param[in] input Input tensor. Data type supported: S32
360 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
361 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000362 * @param[out] output Output tensor. Data type supported: QSYMM16
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100363 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
364 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
Giorgio Arena1856ff72020-02-07 13:46:45 +0000365 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QSYMM16. Defaults to the minimum possible 32-bit signed integer.
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100366 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16.
Giorgio Arena1856ff72020-02-07 13:46:45 +0000367 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100368 */
Giorgio Arena1856ff72020-02-07 13:46:45 +0000369 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int min = std::numeric_limits<int32_t>::lowest(),
370 int max = std::numeric_limits<int32_t>::max());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100371 /** Initialise the kernel's inputs, output
372 *
373 * @param[in] compile_context The compile context to be used.
374 * @param[in] input Input tensor. Data type supported: S32
375 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
376 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
377 * @param[out] output Output tensor. Data type supported: QSYMM16
378 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
379 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
380 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QSYMM16. Defaults to the minimum possible 32-bit signed integer.
381 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16.
382 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
383 */
384 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift,
385 int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100386 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint
387 *
388 * @param[in] input Input tensor info. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
389 * @param[in] bias Biases tensor info. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
390 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000391 * @param[in] output Output tensor info. Data type supported: QSYMM16
Giorgio Arena1856ff72020-02-07 13:46:45 +0000392 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QSYMM16. Defaults to the minimum possible 32-bit signed integer.
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100393 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16,
Giorgio Arena1856ff72020-02-07 13:46:45 +0000394 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100395 *
396 * @return a status
397 */
Giorgio Arena1856ff72020-02-07 13:46:45 +0000398 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100399};
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000400/** Basic function to execute GEMMLowpQuantizeDown kernels on CL.
401 *
402 * This function calls the following CL kernels:
403 *
Luca Foschiani689c9682020-02-26 14:30:14 +0000404 * -# @ref CLGEMMLowpQuantizeDownInt32ScaleKernel
Sheri Zhang1b14c752020-03-09 14:29:52 +0000405 * -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000406 * -# @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel
407 * -# @ref CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPointKernel
Michele Di Giorgio1c1b3aa2020-04-02 17:35:42 +0100408 * -# @ref CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPointKernel
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000409*/
410class CLGEMMLowpOutputStage : public ICLSimpleFunction
411{
412public:
413 /** Initialise the kernel's inputs, output
414 *
415 * @param[in] input Input tensor. Data type supported: S32
416 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
417 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
418 * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED
419 * @param[in] info GEMMLowp output stage metadata.
420 */
421 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo &info);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100422 /** Initialise the kernel's inputs, output
423 *
424 * @param[in] compile_context The compile context to be used.
425 * @param[in] input Input tensor. Data type supported: S32
426 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
427 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
428 * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED
429 * @param[in] info GEMMLowp output stage metadata.
430 */
431 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo &info);
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000432 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel
433 *
434 * @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
435 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
436 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
437 * @param[in] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED
438 * @param[in] info GEMMLowp output stage metadata.
439 *
440 * @return a status
441 */
442 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo &info);
443};
Georgios Pinitas932491f2018-09-21 16:33:15 +0100444} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000445#endif /*ARM_COMPUTE_CLGEMMLOWPOUTPUTSTAGE_H */