blob: 5a5d3938b70d7624bc7c5d778004a5b1f45e9cc9 [file] [log] [blame]
Georgios Pinitas51e53a32018-10-22 13:49:08 +01001/*
2 * Copyright (c) 2018 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEBYFLOATKERNEL_H__
25#define __ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEBYFLOATKERNEL_H__
26
27#include "arm_compute/core/CL/ICLKernel.h"
28
29namespace arm_compute
30{
31// Forward declarations
32class ICLTensor;
33
34/** OpenCL kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8
35 *
36 * This kernel takes a final int32 accumulator value (the output of @ref CLGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8 value.
37 * The following computations will be performed by the kernel:
38 *
39 * -# Compute fixed point multiplication between each entry of input by result_fixedpoint_multiplier
40 * -# Add bias to final result if bias tensor is not a nullptr
41 * -# Requantize
42 * -# Add offset to each result
43 * -# Clamp the value between the specified min and max bounds
44 * -# Clamp the resulting int32 values to the [0..255] range and cast to QASYMM8.
45 */
46class CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel : public ICLKernel
47{
48public:
49 /** Constructor */
50 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel();
51 /** Prevent instances of this class from being copied (As this class contains pointers)*/
52 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel(const CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &) = delete;
53 /** Prevent instances of this class from being copied (As this class contains pointers)*/
54 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &operator=(const CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &) = delete;
55 /** Allow instances of this class to be moved */
56 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &&) = default;
57 /** Allow instances of this class to be moved */
58 CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &operator=(CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFloatKernel &&) = default;
59 /** Initialise the kernel's input and output.
60 *
61 * @param[in] input Input tensor. Data type supported: S32
62 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
63 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
64 * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8
65 * @param[in] multiplier Float multiplier to be multiplied to each element of the input matrix
66 * @param[in] offset Offset to be applied to result before converting it back to QASYMM8
67 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
68 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
69 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
70 * @param[in] output_3d_depth (Optional) Depth of output in 3D (Defaults to 1)
71 */
72 void configure(const ICLTensor *input, const ICLTensor *bias, ICLTensor *output, float multiplier, int offset,
73 int min = 0, int max = 0, unsigned int output_3d_depth = 1);
74 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMLowpQuantizeDownInt32ToUint8ScaleByFixedPointKernel
75 *
76 * @param[in] input Input tensor. Data type supported: S32
77 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
78 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
79 * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
80 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
81 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
82 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
83 * @param[in] output_3d_depth (Optional) Depth of output in 3D (Defaults to 1)
84 *
85 * @return a status
86 */
87 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output,
88 int min = 0, int max = 0, unsigned int output_3d_depth = 1);
89
90 // Inherited methods overridden:
91 void run(const Window &window, cl::CommandQueue &queue) override;
92
93private:
94 const ICLTensor *_input;
95 const ICLTensor *_bias;
96 ICLTensor *_output;
97 bool _reinterpret_as_3d;
98};
99} // namespace arm_compute
100#endif /* __ARM_COMPUTE_CLGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEBYFLOATKERNEL_H__ */