blob: a4edab9b8f2ab5de89beca834461be2a26073d0d [file] [log] [blame]
Gian Marco05288a22017-11-21 10:57:50 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2020 Arm Limited.
Gian Marco05288a22017-11-21 10:57:50 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Manuel Bottini1f332d42019-11-29 17:25:25 +000024#ifndef ARM_COMPUTE_CLGEMMLOWPOUTPUTSTAGE_H
25#define ARM_COMPUTE_CLGEMMLOWPOUTPUTSTAGE_H
Gian Marco05288a22017-11-21 10:57:50 +000026
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010027#include "arm_compute/core/Error.h"
Gian Marco05288a22017-11-21 10:57:50 +000028#include "arm_compute/runtime/CL/ICLSimpleFunction.h"
29
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010030#include <limits>
31
Gian Marco05288a22017-11-21 10:57:50 +000032/** This file contains all available output stages for GEMMLowp on OpenCL.
33 *
34 * In gemmlowp, the "output stage" is the process that takes a final int32 accumulator value (the output of @ref CLGEMMLowpMatrixMultiplyCore),
Manuel Bottini1f332d42019-11-29 17:25:25 +000035 * and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
Gian Marco05288a22017-11-21 10:57:50 +000036 *
37 * More information about the GEMMLowp output stage can be found at https://github.com/google/gemmlowp/blob/master/doc/output.md
38 */
39
40namespace arm_compute
41{
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010042class CLCompileContext;
Gian Marco05288a22017-11-21 10:57:50 +000043class ITensor;
Sang-Hoon Parkbef7fa22020-10-21 15:58:54 +010044class ICLTensor;
45class ITensorInfo;
46struct GEMMLowpOutputStageInfo;
Gian Marco05288a22017-11-21 10:57:50 +000047
Gian Marco58c57942017-11-28 09:10:03 +000048/** Basic function to execute CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint on OpenCL.
49 *
50 * CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint depends on 3 parameters:
51 *
52 * result_fixedpoint_multiplier, result_shift, result_offset_after_shift
53 *
54 * The final result is:
55 *
56 * (FixedPointMul(input[i][k], result_fixedpoint_multiplier) >> result_shift) + result_offset_after_shift
57 *
58 * where FixedPointMul(x, y) is the nearest integer to the following
59 * mathematical expression, evaluated without overflow or intermediate rounding:
60 *
61 * (x * y) / 2^31
62 *
63 * For more information: https://github.com/google/gemmlowp/blob/master/public/output_stages.h#L68
64 *
65 * In case the bias tensor is provided, the final result is:
66 *
67 * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift
68 *
69 * This function calls the following OpenCL kernels:
70 *
Michele Di Giorgioba14c922020-10-12 13:27:57 +010071 * -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
Gian Marco58c57942017-11-28 09:10:03 +000072 *
73 * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
74 * after the result is shifted right by result_shift
75*/
76class CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint : public ICLSimpleFunction
77{
78public:
79 /** Initialise the kernel's inputs, output
80 *
81 * @param[in] input Input tensor. Data type supported: S32
82 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
83 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +000084 * @param[out] output Output tensor. Data type supported: QASYMM8
Gian Marco58c57942017-11-28 09:10:03 +000085 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
86 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
87 * @param[in] result_offset_after_shift Offset to be applied to result before converting it back to QASYMM8
Giorgio Arena1856ff72020-02-07 13:46:45 +000088 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
Gian Marco58c57942017-11-28 09:10:03 +000089 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
Giorgio Arena1856ff72020-02-07 13:46:45 +000090 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Gian Marco58c57942017-11-28 09:10:03 +000091 */
Georgios Pinitas932491f2018-09-21 16:33:15 +010092 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
Giorgio Arena1856ff72020-02-07 13:46:45 +000093 int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini2b84be52020-04-08 10:15:51 +010094 /** Initialise the kernel's inputs, output
95 *
96 * @param[in] compile_context The compile context to be used.
97 * @param[in] input Input tensor. Data type supported: S32
98 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
99 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
100 * @param[out] output Output tensor. Data type supported: QASYMM8
101 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
102 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
103 * @param[in] result_offset_after_shift Offset to be applied to result before converting it back to QASYMM8
104 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
105 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
106 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
107 */
108 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift,
109 int result_offset_after_shift,
110 int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Gian Marco58c57942017-11-28 09:10:03 +0000111 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPoint
112 *
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100113 * @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
114 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
115 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000116 * @param[in] output Output tensor. Data type supported: QASYMM8
Giorgio Arena1856ff72020-02-07 13:46:45 +0000117 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8. Defaults to the minimum possible 32-bit signed integer.
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100118 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
Giorgio Arena1856ff72020-02-07 13:46:45 +0000119 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Gian Marco58c57942017-11-28 09:10:03 +0000120 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000121 * @return a status
Gian Marco58c57942017-11-28 09:10:03 +0000122 */
Giorgio Arena1856ff72020-02-07 13:46:45 +0000123 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Gian Marco05288a22017-11-21 10:57:50 +0000124};
Georgios Pinitas51e53a32018-10-22 13:49:08 +0100125
Manuel Bottini1f332d42019-11-29 17:25:25 +0000126/** Basic function to execute CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint on OpenCL.
127 *
128 * CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint depends on 3 parameters:
129 *
130 * result_fixedpoint_multiplier, result_shift, result_offset_after_shift
131 *
132 * The final result is:
133 *
134 * (FixedPointMul(input[i][k], result_fixedpoint_multiplier) >> result_shift) + result_offset_after_shift
135 *
136 * where FixedPointMul(x, y) is the nearest integer to the following
137 * mathematical expression, evaluated without overflow or intermediate rounding:
138 *
139 * (x * y) / 2^31
140 *
141 * For more information: https://github.com/google/gemmlowp/blob/master/public/output_stages.h#L68
142 *
143 * In case the bias tensor is provided, the final result is:
144 *
145 * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift
146 *
147 * This function calls the following OpenCL kernels:
148 *
Michele Di Giorgioba14c922020-10-12 13:27:57 +0100149 * -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
Manuel Bottini1f332d42019-11-29 17:25:25 +0000150 *
151 * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
152 * after the result is shifted right by result_shift
153*/
154class CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint : public ICLSimpleFunction
155{
156public:
157 /** Initialise the kernel's inputs, output
158 *
159 * @param[in] input Input tensor. Data type supported: S32
160 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
161 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000162 * @param[out] output Output tensor. Data type supported: QASYMM8_SIGNED
Manuel Bottini1f332d42019-11-29 17:25:25 +0000163 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
164 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
165 * @param[in] result_offset_after_shift Offset to be applied to result before converting it back to QASYMM8_SIGNED
Giorgio Arena1856ff72020-02-07 13:46:45 +0000166 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8_SIGNED. Defaults to the minimum possible 32-bit signed integer.
Manuel Bottini1f332d42019-11-29 17:25:25 +0000167 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8_SIGNED. Defaults to 0
Giorgio Arena1856ff72020-02-07 13:46:45 +0000168 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Manuel Bottini1f332d42019-11-29 17:25:25 +0000169 */
170 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int result_offset_after_shift,
Giorgio Arena1856ff72020-02-07 13:46:45 +0000171 int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100172 /** Initialise the kernel's inputs, output
173 *
174 * @param[in] compile_context The compile context to be used.
175 * @param[in] input Input tensor. Data type supported: S32
176 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
177 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
178 * @param[out] output Output tensor. Data type supported: QASYMM8_SIGNED
179 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
180 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
181 * @param[in] result_offset_after_shift Offset to be applied to result before converting it back to QASYMM8_SIGNED
182 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8_SIGNED. Defaults to the minimum possible 32-bit signed integer.
183 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8_SIGNED. Defaults to 0
184 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
185 */
186 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift,
187 int result_offset_after_shift,
188 int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini1f332d42019-11-29 17:25:25 +0000189 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToInt8ScaleByFixedPoint
190 *
191 * @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
192 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
193 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000194 * @param[in] output Output tensor. Data type supported: QASYMM8_SIGNED
Giorgio Arena1856ff72020-02-07 13:46:45 +0000195 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8_SIGNED. Defaults to the minimum possible 32-bit signed integer.
Manuel Bottini1f332d42019-11-29 17:25:25 +0000196 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8_SIGNED. Defaults to 0
Giorgio Arena1856ff72020-02-07 13:46:45 +0000197 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Manuel Bottini1f332d42019-11-29 17:25:25 +0000198 *
199 * @return a status
200 */
Giorgio Arena1856ff72020-02-07 13:46:45 +0000201 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini1f332d42019-11-29 17:25:25 +0000202};
203
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100204/** Basic function to execute CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint on OpenCL.
205 *
206 * CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint depends on 2 parameters:
207 *
208 * result_fixedpoint_multiplier, result_shift
209 *
210 * The final result is:
211 *
212 * (FixedPointMul(input[i][k], result_fixedpoint_multiplier) >> result_shift)
213 *
214 * where FixedPointMul(x, y) is the nearest integer to the following
215 * mathematical expression, evaluated without overflow or intermediate rounding:
216 *
217 * (x * y) / 2^31
218 *
219 * For more information: https://github.com/google/gemmlowp/blob/master/public/output_stages.h#L68
220 *
221 * In case the bias tensor is provided, the final result is:
222 *
223 * ((FixedPointMul(input[i][k] + bias[k], result_fixedpoint_multiplier)) >> result_shift) + result_offset_after_shift
224 *
225 * This function calls the following NEON kernels:
226 *
Michele Di Giorgioba14c922020-10-12 13:27:57 +0100227 * -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100228 *
229 * @note The function accepts also 2 optional input arguments (min and max) which can be used to implement "rectified linear unit" activation functions
230 * after the result is shifted right by result_shift
231*/
232class CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint : public ICLSimpleFunction
233{
234public:
235 /** Initialise the kernel's inputs, output
236 *
237 * @param[in] input Input tensor. Data type supported: S32
238 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
239 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000240 * @param[out] output Output tensor. Data type supported: QSYMM16
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100241 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
242 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
Giorgio Arena1856ff72020-02-07 13:46:45 +0000243 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QSYMM16. Defaults to the minimum possible 32-bit signed integer.
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100244 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16.
Giorgio Arena1856ff72020-02-07 13:46:45 +0000245 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100246 */
Giorgio Arena1856ff72020-02-07 13:46:45 +0000247 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift, int min = std::numeric_limits<int32_t>::lowest(),
248 int max = std::numeric_limits<int32_t>::max());
Manuel Bottini2b84be52020-04-08 10:15:51 +0100249 /** Initialise the kernel's inputs, output
250 *
251 * @param[in] compile_context The compile context to be used.
252 * @param[in] input Input tensor. Data type supported: S32
253 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
254 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
255 * @param[out] output Output tensor. Data type supported: QSYMM16
256 * @param[in] result_fixedpoint_multiplier Fixed point value to be multiplied to each element of the input matrix when once the result_offset has been add
257 * @param[in] result_shift Number of bits to shift right the result after the fixed point multiplication
258 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QSYMM16. Defaults to the minimum possible 32-bit signed integer.
259 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16.
260 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
261 */
262 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, int result_fixedpoint_multiplier, int result_shift,
263 int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100264 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToInt16ScaleByFixedPoint
265 *
266 * @param[in] input Input tensor info. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
267 * @param[in] bias Biases tensor info. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
268 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000269 * @param[in] output Output tensor info. Data type supported: QSYMM16
Giorgio Arena1856ff72020-02-07 13:46:45 +0000270 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QSYMM16. Defaults to the minimum possible 32-bit signed integer.
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100271 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QSYMM16,
Giorgio Arena1856ff72020-02-07 13:46:45 +0000272 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions. Defaults to the maximum possible 32-bit signed integer.
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100273 *
274 * @return a status
275 */
Giorgio Arena1856ff72020-02-07 13:46:45 +0000276 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = std::numeric_limits<int32_t>::lowest(), int max = std::numeric_limits<int32_t>::max());
Manuel Bottini9c9b70b2019-07-01 17:35:56 +0100277};
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000278/** Basic function to execute GEMMLowpQuantizeDown kernels on CL.
279 *
280 * This function calls the following CL kernels:
281 *
Luca Foschiani689c9682020-02-26 14:30:14 +0000282 * -# @ref CLGEMMLowpQuantizeDownInt32ScaleKernel
Sheri Zhang1b14c752020-03-09 14:29:52 +0000283 * -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel
Michele Di Giorgioba14c922020-10-12 13:27:57 +0100284 * -# @ref CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000285*/
286class CLGEMMLowpOutputStage : public ICLSimpleFunction
287{
288public:
289 /** Initialise the kernel's inputs, output
290 *
291 * @param[in] input Input tensor. Data type supported: S32
292 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
293 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
294 * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED
295 * @param[in] info GEMMLowp output stage metadata.
296 */
297 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo &info);
Manuel Bottini2b84be52020-04-08 10:15:51 +0100298 /** Initialise the kernel's inputs, output
299 *
300 * @param[in] compile_context The compile context to be used.
301 * @param[in] input Input tensor. Data type supported: S32
302 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
303 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
304 * @param[out] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED
305 * @param[in] info GEMMLowp output stage metadata.
306 */
307 void configure(const CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo &info);
Michele Di Giorgioba14c922020-10-12 13:27:57 +0100308 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ScaleByFixedPointKernel
Sheri Zhang0cdbda52020-02-25 15:57:21 +0000309 *
310 * @param[in] input Input tensor. It is the output of @ref CLGEMMLowpMatrixMultiplyCore function. Data type supported: S32
311 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
312 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
313 * @param[in] output Output tensor. Data type supported: QASYMM8/QASYMM8_SIGNED
314 * @param[in] info GEMMLowp output stage metadata.
315 *
316 * @return a status
317 */
318 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo &info);
319};
Georgios Pinitas932491f2018-09-21 16:33:15 +0100320} // namespace arm_compute
Sang-Hoon Parka45abfd2020-08-17 13:50:15 +0100321#endif /*ARM_COMPUTE_CLGEMMLOWPOUTPUTSTAGE_H */