blob: 84f45b069939f7cf81754a5ebb58383381d60b8f [file] [log] [blame]
Gian Marcoe75a02b2017-11-08 12:24:09 +00001/*
Anthony Barbiere8a49832018-01-18 10:04:05 +00002 * Copyright (c) 2017-2018 ARM Limited.
Gian Marcoe75a02b2017-11-08 12:24:09 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Gian Marco58c57942017-11-28 09:10:03 +000024#ifndef __ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEKERNEL_H__
25#define __ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEKERNEL_H__
Gian Marcoe75a02b2017-11-08 12:24:09 +000026
27#include "arm_compute/core/NEON/INEKernel.h"
28
29namespace arm_compute
30{
31class ITensor;
32
Gian Marco05288a22017-11-21 10:57:50 +000033/** NEON kernel used to quantize down the int32 accumulator values of GEMMLowp to QASYMM8
Gian Marcoe75a02b2017-11-08 12:24:09 +000034 *
35 * This kernel takes a final int32 accumulator value (the output of @ref NEGEMMLowpMatrixMultiplyKernel), and processes it to obtain the final QASYMM8 value.
36 * The following computations will be performed by the kernel:
37 *
38 * -# Add offset terms to final result
Gian Marco6b77e912017-11-17 09:27:57 +000039 * -# Multiply each entry of result by result_mult_int
40 * -# Add bias to final result if bias tensor is not a nullptr
41 * -# Shift the int32 accumulator by result_shift
42 * -# Clamp the value between the specified min and max bounds
Gian Marcoe75a02b2017-11-08 12:24:09 +000043 * -# Clamp the resulting int32 values to the [0..255] range and cast to QASYMM8.
44 *
45 */
46class NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel : public INEKernel
47{
48public:
Anthony Barbiere8a49832018-01-18 10:04:05 +000049 const char *name() const override
50 {
51 return "NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel";
52 }
Gian Marcoe75a02b2017-11-08 12:24:09 +000053 /** Constructor */
54 NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel();
55 /** Prevent instances of this class from being copied (As this class contains pointers)*/
56 NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel(const NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &) = delete;
57 /** Prevent instances of this class from being copied (As this class contains pointers)*/
58 NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &operator=(const NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &) = delete;
59 /** Allow instances of this class to be moved */
60 NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel(NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &&) = default;
61 /** Allow instances of this class to be moved */
62 NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &operator=(NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel &&) = default;
63 /** Initialise the kernel's input and output.
64 *
Anthony Barbierf202e502017-11-23 18:02:04 +000065 * @param[in] input Input tensor. Data type supported: S32
66 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
67 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
68 * @param[out] output Output tensor. Data type supported: Data type supported: QASYMM8
69 * @param[in] result_offset Offset to be added to each element of the input matrix
70 * @param[in] result_mult_int Value to be multiplied to each element of the input matrix when once the result_offset has been add
71 * @param[in] result_shift Number of bits to shift right the result before converting back to QASYMM8
72 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
73 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
74 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
Gian Marcoe75a02b2017-11-08 12:24:09 +000075 */
Gian Marco6b77e912017-11-17 09:27:57 +000076 void configure(const ITensor *input, const ITensor *bias, ITensor *output, int result_offset, int result_mult_int, int result_shift, int min = 0, int max = 0);
Georgios Pinitasa3b1b462017-11-16 19:24:39 +000077 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel
78 *
Anthony Barbierf202e502017-11-23 18:02:04 +000079 * @param[in] input Input tensor. Data type supported: S32
80 * @param[in] bias Biases tensor. Only shared biases supported and it can be a nullptr if the biases addition is not required.
81 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p input.
82 * @param[in] output Output tensor. Data type supported: Data type supported: QASYMM8
83 * @param[in] min (Optional) Min value used to saturate down the output result before converting back to QASYMM8
84 * @param[in] max (Optional) Max value used to saturate up the output result before converting back to QASYMM8,
85 * Along with @p min, this value can be used to implement "rectified linear unit" activation functions
Gian Marco58c57942017-11-28 09:10:03 +000086 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +000087 * @return a status
Georgios Pinitasa3b1b462017-11-16 19:24:39 +000088 */
Georgios Pinitas631c41a2017-12-06 11:53:03 +000089 static Status validate(const ITensorInfo *input, const ITensorInfo *bias, const ITensorInfo *output, int min = 0, int max = 0);
Gian Marcoe75a02b2017-11-08 12:24:09 +000090
91 // Inherited methods overridden:
92 void run(const Window &window, const ThreadInfo &info) override;
93
94private:
Gian Marco6b77e912017-11-17 09:27:57 +000095 /** Template function to run the NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel
96 *
97 * @param[in] window Region on which to execute the kernel. (Must be a valid region of the window returned by window()).
98 */
99 template <bool is_bounded_relu>
100 void run(const Window &window);
101
102 /** Common signature for all the specialised NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel functions
103 *
104 * @param[in] window Region on which to execute the kernel.
105 */
106 using QuantizeDownFunctionPtr = void (NEGEMMLowpQuantizeDownInt32ToUint8ScaleKernel::*)(const Window &window);
107
108 QuantizeDownFunctionPtr _func;
109 const ITensor *_input;
110 const ITensor *_bias;
111 ITensor *_output;
112 int _result_offset;
113 int _result_mult_int;
114 int _result_shift;
115 int _min;
116 int _max;
Gian Marcoe75a02b2017-11-08 12:24:09 +0000117};
118} // namespace arm_compute
119
Gian Marco58c57942017-11-28 09:10:03 +0000120#endif /* __ARM_COMPUTE_NEGEMMLOWPQUANTIZEDOWNINT32TOUINT8SCALEKERNEL_H__ */