blob: 0b5b22cafc8c38c78627546f67d101fbba9688df [file] [log] [blame]
Sheri Zhang1b14c752020-03-09 14:29:52 +00001/*
2 * Copyright (c) 2020 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEBYFLOATKERNEL_H
25#define ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEBYFLOATKERNEL_H
26
27#include "arm_compute/core/CL/ICLKernel.h"
28
29namespace arm_compute
30{
31// Forward declarations
32class ICLTensor;
33
34/** OpenCL kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8/QASYMM8_SIGNED
35 *
36 * This kernel takes a final int32 accumulator value (the output of @ref CLGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8/QASYMM8_SIGNED value.
37 * The following computations will be performed by the kernel:
38 *
39 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
40 * -# Add bias to final result if bias tensor is not a nullptr
41 * -# Requantize
42 * -# Add offset to each result
43 * -# Clamp the value between the specified min and max bounds
44 * -# Clamp the resulting int32 values to
45 * - to the [0..255] range and cast to QASYMM8.
46 * - to the [-128..127] range and cast to QASYMM8_SIGNED.
47 */
48class CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel : public ICLKernel
49{
50public:
51 /** Constructor */
52 CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel();
53 /** Prevent instances of this class from being copied (As this class contains pointers)*/
54 CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel(const CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &) = delete;
55 /** Prevent instances of this class from being copied (As this class contains pointers)*/
56 CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &operator=(const CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &) = delete;
57 /** Allow instances of this class to be moved */
58 CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel(CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &&) = default;
59 /** Allow instances of this class to be moved */
60 CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &operator=(CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel &&) = default;
61 /** Initialise the kernel's input and output.
62 *
63 * @param[in] input Input tensor. Data type supported: S32
64 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
65 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
66 * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
67 * @param[in] info Output stage info. Used to pass the quantized output data type
68 */
69 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *info);
Manuel Bottini4c6bd512020-04-08 10:15:51 +010070 /** Initialise the kernel's input and output.
71 *
72 * @param[in] compile_context The compile context to be used.
73 * @param[in] input Input tensor. Data type supported: S32
74 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
75 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
76 * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
77 * @param[in] info Output stage info. Used to pass the quantized output data type
78 */
79 void configure(CLCompileContext &compile_context, const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, const GEMMLowpOutputStageInfo *info);
Sheri Zhang1b14c752020-03-09 14:29:52 +000080 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ScaleByFloatKernel
81 *
82 * @param[in] input Input tensor. Data type supported: S32
83 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
84 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
85 * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED
86 * @param[in] info Output stage info. Used to pass the quantized output data type
87 *
88 * @return a status
89 */
90 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, const GEMMLowpOutputStageInfo *info);
91
92 // Inherited methods overridden:
93 void run(const Window &window, cl::CommandQueue &queue) override;
94
95private:
96 const ICLTensor *_input;
97 const ICLTensor *_bias;
98 ICLTensor *_output;
99};
100} // namespace arm_compute
101#endif /* ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32SCALEBYFLOATKERNEL_H */