blob: af477d4756c73d55004bd8516ae9e66acf15d06d [file] [log] [blame]
Manuel Bottinicfac51c2021-06-18 15:47:28 +01001/*
Giorgio Arena5ae8d802021-11-18 18:02:13 +00002 * Copyright (c) 2019-2022 Arm Limited.
Manuel Bottinicfac51c2021-06-18 15:47:28 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_OUTPUTSTAGE_KERNEL_H
25#define ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_OUTPUTSTAGE_KERNEL_H
26
27#include "arm_compute/core/KernelDescriptors.h"
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010028
Manuel Bottinicfac51c2021-06-18 15:47:28 +010029#include "src/core/common/Macros.h"
Georgios Pinitas7891a732021-08-20 21:39:25 +010030#include "src/cpu/ICpuKernel.h"
Manuel Bottinicfac51c2021-06-18 15:47:28 +010031
32namespace arm_compute
33{
34namespace cpu
35{
36namespace kernels
37{
38/** Kernel used to add the offset contribution and perform the output stage after @ref CpuGemmLowpMatrixMultiplyKernel.
39 *
40 * The computation is performed in-place
41 *
42 * This kernel takes a final int32 accumulator value (the output of @ref CpuGemmLowpMatrixMultiplyKernel),
43 * and adds to it the offset contribution of matrix A and matrix B in-place.
44 *
45 * The output stage can perform either QuantizeDownInt32ToUint8Scale or QuantizeDownInt32ToUint8ScaleByFixedPoint for Uint8.
46 * The output stage can perform either QuantizeDownInt32ToInt8Scale or QuantizeDownInt32ToInt8ScaleByFixedPoint for Int8.
47 *
48 * For QuantizeDownInt32ToUint8Scale/QuantizeDownInt32ToInt8Scale the final result is:
49 *
50 * ((mm_result'[i][k] + result_offset) * result_mult_int) >> result_shift
51 *
52 * For QuantizeDownInt32ToUint8ScaleByFixedPoint/QuantizeDownInt32ToInt8ScaleByFixedPoint the final result is:
53 *
54 * (FixedPointMul(mm_result'[i][k], result_fixedpoint_multiplier) >> result_shift) + result_offset_after_shift
55 *
56 * where FixedPointMul(x, y) is the nearest integer to the following
57 * mathematical expression, evaluated without overflow or intermediate rounding:
58 *
59 * (x * y) / 2^31
60 *
61 * and mm_result'[i][k] = mm_result[i][k] +
62 * (vector_sum_col[k] * a_offset) +
63 * (vector_sum_row[i] * b_offset) +
64 * (a_offset * b_offset * k)
65 */
66
Yair Schwarzbaum46d44d22022-01-12 16:38:58 +020067class CpuGemmLowpOffsetContributionOutputStageKernel : public ICpuKernel<CpuGemmLowpOffsetContributionOutputStageKernel>
Manuel Bottinicfac51c2021-06-18 15:47:28 +010068{
69public:
70 /** Default constructor */
71 CpuGemmLowpOffsetContributionOutputStageKernel() = default;
72 ARM_COMPUTE_DISALLOW_COPY_ALLOW_MOVE(CpuGemmLowpOffsetContributionOutputStageKernel);
73 /** Initialise the kernel inputs and output.
74 *
75 * @param[in] mm_result Input tensor info containing the result of @ref CpuGemmLowpMatrixMultiplyKernel. Data type supported: S32
76 * @param[in] vector_sum_col Input row-vector tensor info of sums of all the entries in each column of matrix B.
Mohammed Suhail Munshi5b9d2232023-01-25 11:51:50 +000077 * Can be a 1D or 2D tensor, in case of 2D, y dim is the batch dimension
Manuel Bottinicfac51c2021-06-18 15:47:28 +010078 * Note: vector_sum_col can be a nullptr in case a_offset = 0. Data type supported: same as @p mm_result
79 * @param[in] vector_sum_row Input row-vector tensor info of sums of all the entries in each row of matrix A.
Mohammed Suhail Munshi5b9d2232023-01-25 11:51:50 +000080 * Can be a 1D or 2D tensor, in case of 2D, y dim is the batch dimension
Manuel Bottinicfac51c2021-06-18 15:47:28 +010081 * @param[in] bias Biases tensor info. Only shared biases supported and it can be a nullptr if the addition of biases is not required.
82 * Biases are 1D tensor with dimensions [OFM]. Data type supported: Same as @p mm_result.
83 * @param[out] dst Output tensor info containing the final quantized result. Data type supported: QASYMM8/QASYMM8_SIGNED
84 * @param[in] k Number of matrix A columns or Matrix B rows
85 * @param[in] a_offset Offset to be added to each element of the matrix A.
86 * @param[in] b_offset Offset to be added to each element of the matrix B.
87 * @param[in] output_stage GEMMLowp output stage info, providing the type of quantization and the necessary parameters.
88 */
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +010089 void configure(const ITensorInfo *mm_result,
90 const ITensorInfo *vector_sum_col,
91 const ITensorInfo *vector_sum_row,
92 const ITensorInfo *bias,
93 ITensorInfo *dst,
94 int32_t k,
95 int32_t a_offset,
Georgios Pinitas7891a732021-08-20 21:39:25 +010096 int32_t b_offset,
Manuel Bottinicfac51c2021-06-18 15:47:28 +010097 GEMMLowpOutputStageInfo output_stage);
98 /** Static function to check if given info will lead to a valid configuration
99 *
100 * Similar to CpuGemmLowpOffsetContributionOutputStageKernel::configure()
101 *
102 * @return a status
103 */
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100104 static Status validate(const ITensorInfo *mm_result,
105 const ITensorInfo *vector_sum_col,
106 const ITensorInfo *vector_sum_row,
107 const ITensorInfo *bias,
108 const ITensorInfo *dst,
109 int32_t a_offset,
Manuel Bottinicfac51c2021-06-18 15:47:28 +0100110 int32_t b_offset,
111 GEMMLowpOutputStageInfo output_stage);
112
113 // Inherited methods overridden:
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100114 void run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info) override;
Manuel Bottinicfac51c2021-06-18 15:47:28 +0100115 const char *name() const override;
116
117private:
118 /** Function to use for the particular tensors passed to configure() */
Felix Thomasmathibalanafd38f02023-09-27 17:46:17 +0100119 int32_t _a_offset{0};
120 int32_t _b_offset{0};
121 int32_t _k_offset{0};
122 bool _is_vector_sum_col_batched{true};
123 GEMMLowpOutputStageInfo _output_stage{GEMMLowpOutputStageInfo()};
Manuel Bottinicfac51c2021-06-18 15:47:28 +0100124};
125} // namespace kernels
126} // namespace cpu
127} // namespace arm_compute
128#endif /* ARM_COMPUTE_CPU_GEMMLOWP_OFFSETCONTRIBUTION_OUTPUTSTAGE_KERNEL_H */