blob: a3ff6e988f2512538ac75cced5edf9669d56a52c [file] [log] [blame]
Sang-Hoon Park0d008f72020-03-13 14:56:05 +00001/*
Sheri Zhangac6499a2021-02-10 15:32:38 +00002 * Copyright (c) 2020-2021 Arm Limited.
Sang-Hoon Park0d008f72020-03-13 14:56:05 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_NEQLSTMLAYERNORMALIZATIONKERNEL_H
25#define ARM_COMPUTE_NEQLSTMLAYERNORMALIZATIONKERNEL_H
26
Michalis Spyrouebcebf12020-10-21 00:04:14 +010027#include "src/core/NEON/INEKernel.h"
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000028#include <functional>
29
30namespace arm_compute
31{
32class ITensor;
33
Michele Di Giorgio33f41fa2021-03-09 14:09:08 +000034/** Kernel to perform layer normalization for QLSTM. */
Sang-Hoon Park0d008f72020-03-13 14:56:05 +000035class NEQLSTMLayerNormalizationKernel : public INEKernel
36{
37public:
38 const char *name() const override
39 {
40 return "NEQLSTMLayerNormalizationKernel";
41 }
42 /** Default constructor */
43 NEQLSTMLayerNormalizationKernel() = default;
44 /** Prevent instances of this class from being copied (As this class contains pointers) */
45 NEQLSTMLayerNormalizationKernel(const NEQLSTMLayerNormalizationKernel &) = delete;
46 /** Prevent instances of this class from being copied (As this class contains pointers) */
47 NEQLSTMLayerNormalizationKernel &operator=(const NEQLSTMLayerNormalizationKernel &) = delete;
48 /** Default Move Constructor. */
49 NEQLSTMLayerNormalizationKernel(NEQLSTMLayerNormalizationKernel &&) = default;
50 /** Default move assignment operator */
51 NEQLSTMLayerNormalizationKernel &operator=(NEQLSTMLayerNormalizationKernel &&) = default;
52 /** Default destructor */
53 ~NEQLSTMLayerNormalizationKernel() = default;
54
55 /** Set the input and output tensors.
56 *
57 * @param[in] input Source tensor. Data types supported: QSYMM16.
58 * @param[out] output Destination tensor. Data types supported: Same as @p input.
59 * @param[in] weight Weight tensor. Data types supported: Same as @p input.
60 * @param[in] bias Bias tensor. Data types supported: S32
61 */
62 void configure(const ITensor *input, ITensor *output, const ITensor *weight, const ITensor *bias);
63 /** Static function to check if given info will lead to a valid configuration of @ref NEQLSTMLayerNormalizationKernel
64 *
65 * @param[in] input Source tensor info. Data types supported: QSYMM16.
66 * @param[in] output Destination tensor info. Data types supported: Same as @p input.
67 * @param[in] weight Weight tensor info. Data types supported: Same as @p input.
68 * @param[in] bias Bias tensor info. Data types supported: S32
69 *
70 * @return a status
71 */
72 static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias);
73 // Inherited methods overridden:
74 void run(const Window &window, const ThreadInfo &info) override;
75
76private:
77 // constants
78 static constexpr uint32_t max_input_dimension{ 2 }; /**< The maximum input dimension supported */
79 static constexpr uint32_t max_weight_dimension{ 1 }; /**< The maximum weight dimension supported */
80 static constexpr uint32_t max_bias_dimension{ 1 }; /**< The maximum bias dimension supported */
81 static constexpr uint32_t vector_size_byte{ 16 }; /**< Computation vector size in byte */
82
83 using ComputeFuncType = std::function<void(NEQLSTMLayerNormalizationKernel &)>;
84
85 ComputeFuncType _fn{}; /**< Function pointer to computation function */
86
Michalis Spyrouebcebf12020-10-21 00:04:14 +010087 const ITensor *_input
88 {
89 nullptr
90 }; /**< Input tensor */
91 const ITensor *_weight
92 {
93 nullptr
94 }; /**< Weight tensor */
95 const ITensor *_bias
96 {
97 nullptr
98 }; /**< Bias tensor */
99 ITensor *_output{ nullptr }; /**< Output tensor */
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000100
101 int32_t _output_multiplier{}; /**< Multiplier for output values */
102 int32_t _output_shift{}; /**< Shift value for output values */
103
104 int32_t _window_start_x{}; /**< The beginning of x-axis iteration */
105 int32_t _window_end_x{}; /**< The end of x-axis iteration */
106 int32_t _window_step_x{}; /**< The size of x-axis iteration's step */
107
108 Window _inout_window{}; /**< Window for input and output tensor */
109 Window _weight_window{}; /**< Window for weight and bias tensor */
110
111 /** Function to configure initial windows for destination of computation
112 *
113 * @param[in] Target destination tensor to use for output window
114 *
115 * @return configured window
116 */
117 Window configure_window(ITensor *target);
118 // Function to compute for data type QSYMM16
119 void compute_qsymm16();
120 /** Function to compute summation and summation of squared input of the given input pointer
121 *
122 * @param[in] Input_ptr pointer to input array
123 *
124 */
125 std::pair<int64_t, int64_t> sum_qsymm16(const int16_t *input_ptr);
126 /** Function to normalize values using computed mean and standard deviation
127 *
128 * @param[in] input_ptr Pointer to input array
129 * @param[in] output_ptr Pointer to output array
130 * @param[in] weight_ptr Pointer to weight array
131 * @param[in] bias_ptr Pointer to bias array
132 * @param[in] mean Mean value
133 * @param[in] inv_std_mul Quantized multiplier for standard deviation
134 * @param[in] inv_std_shift Shift for standard deviation
135 *
136 */
137 void normalize_qasymm16(const int16_t *input_ptr,
138 int16_t *output_ptr,
139 const int16_t *weight_ptr,
140 const int32_t *bias_ptr,
141 int32_t mean, int32_t inv_std_mul, int32_t inv_std_shift);
Sang-Hoon Park9230e272020-04-18 00:46:34 +0100142 /** Function to compute output quantization information */
143 QuantizationInfo compute_output_qinfo();
Sang-Hoon Park0d008f72020-03-13 14:56:05 +0000144};
145} // namespace arm_compute
146#endif /* ARM_COMPUTE_NEQLSTMLAYERNORMALIZATIONKERNEL_H */