blob: 65f1042b9cb2764d3e38007d45b06c8bf193e73b [file] [log] [blame]
Gian Marcoe75a02b2017-11-08 12:24:09 +00001/*
2 * Copyright (c) 2017 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALE_H__
25#define __ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALE_H__
26
27#include "arm_compute/core/NEON/INEKernel.h"
28
29namespace arm_compute
30{
31class ITensor;
32
33/* NEON kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8
34 *
35 * This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8 value.
36 * The following computations will be performed by the kernel:
37 *
38 * -# Add offset terms to final result
39 * -# Multiply each entry of result and round to nearest integer
40 * -# Clamp the resulting int32 values to the [0..255] range and cast to QASYMM8.
41 *
42 */
43class NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel : public INEKernel
44{
45public:
46 /** Constructor */
47 NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel();
48 /** Prevent instances of this class from being copied (As this class contains pointers)*/
49 NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel(const NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &) = delete;
50 /** Prevent instances of this class from being copied (As this class contains pointers)*/
51 NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &operator=(const NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &) = delete;
52 /** Allow instances of this class to be moved */
53 NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel(NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &&) = default;
54 /** Allow instances of this class to be moved */
55 NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &operator=(NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &&) = default;
56 /** Initialise the kernel's input and output.
57 *
58 * @param[in] input Input tensor. Data type supported: S32
59 * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8
60 * @param[in] result_offset Offset to be added to each element of the input matrix
61 * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
62 * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
63 */
64 void configure(const ITensor *input, ITensor *output, int result_offset, int result_mult_int, int result_shift);
65
66 // Inherited methods overridden:
67 void run(const Window &window, const ThreadInfo &info) override;
68
69private:
70 const ITensor *_input;
71 ITensor *_output;
72 int32_t _result_offset;
73 int32_t _result_mult_int;
74 int32_t _result_shift;
75};
76} // namespace arm_compute
77
78#endif /* __ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALE_H__ */