blob: 53a542c2dfaf76d266077772bca2c17222232939 [file] [log] [blame]
Gian Marco Iodiceab182122017-10-09 15:05:40 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2017-2020 Arm Limited.
Gian Marco Iodiceab182122017-10-09 15:05:40 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H
25#define ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H
Gian Marco Iodiceab182122017-10-09 15:05:40 +010026
27#include "arm_compute/core/NEON/INEKernel.h"
28
29namespace arm_compute
30{
Michele Di Giorgioa602f032020-03-12 19:34:33 +000031// Forward declarations
Gian Marco Iodiceab182122017-10-09 15:05:40 +010032class ITensor;
Michele Di Giorgioa602f032020-03-12 19:34:33 +000033struct GEMMLowpReductionKernelInfo;
Gian Marco Iodiceab182122017-10-09 15:05:40 +010034
35/** Common interface for all NEON reduction kernels */
36class INEGEMMLowpReductionKernel : public INEKernel
37{
38public:
39 /** Constructor */
40 INEGEMMLowpReductionKernel();
41 /** Prevent instances of this class from being copied (As this class contains pointers)*/
42 INEGEMMLowpReductionKernel(const INEGEMMLowpReductionKernel &) = delete;
43 /** Prevent instances of this class from being copied (As this class contains pointers)*/
44 INEGEMMLowpReductionKernel &operator=(const INEGEMMLowpReductionKernel &) = delete;
45 /** Allow instances of this class to be moved */
46 INEGEMMLowpReductionKernel(INEGEMMLowpReductionKernel &&) = default;
47 /** Allow instances of this class to be moved */
48 INEGEMMLowpReductionKernel &operator=(INEGEMMLowpReductionKernel &&) = default;
49
Gian Marco Iodiceab182122017-10-09 15:05:40 +010050 /** Initialise the kernel's input and output.
51 *
Michele Di Giorgiof9b595a2020-07-03 13:34:52 +010052 * @param[in] input Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
Michele Di Giorgioa602f032020-03-12 19:34:33 +000053 * @param[out] output Output row-vector of sums of all the entries in each row/col of input tensor. Data type supported: S32
54 * @param[in] info Kernel metadata:
55 * - k Number of matrix columns/rows depending on the type of reduction.
56 * - is_reshaped True if the matrix has been reshaped.
57 * - scalar Scalar value to multiply each reduced column/row by.
58 * - mul_byscalar True if each reduced column/row must be multiplied by a scalar value.
Gian Marco Iodiceab182122017-10-09 15:05:40 +010059 */
Michele Di Giorgioa602f032020-03-12 19:34:33 +000060 virtual void configure(const ITensor *input, ITensor *output, const GEMMLowpReductionKernelInfo &info) = 0;
Gian Marco Iodiceab182122017-10-09 15:05:40 +010061
62protected:
63 const ITensor *_input;
64 ITensor *_output;
65 int32_t _k;
66 bool _is_reshaped;
Michele Di Giorgioa602f032020-03-12 19:34:33 +000067 int32_t _scalar;
68 bool _mul_by_scalar;
Gian Marco Iodiceab182122017-10-09 15:05:40 +010069};
70
71/** NEON kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
72 *
73 * @note This stage is needed to handle the offset of matrix product
74 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
75 */
76class NEGEMMLowpMatrixAReductionKernel : public INEGEMMLowpReductionKernel
77{
78public:
Anthony Barbiere8a49832018-01-18 10:04:05 +000079 const char *name() const override
80 {
81 return "NEGEMMLowpMatrixAReductionKernel";
82 }
Gian Marco Iodiceab182122017-10-09 15:05:40 +010083 /** Initialise the kernel's input and output.
84 *
Michele Di Giorgiof9b595a2020-07-03 13:34:52 +010085 * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
Michele Di Giorgioa602f032020-03-12 19:34:33 +000086 * @param[out] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32
87 * @param[in] info Kernel metadata:
88 * - k (num_mtx_a_cols) Number of matrix A columns
89 * - is_reshaped (is_interleaved4x4) True if the matrix A has been interleaved4x4
90 * - scalar Scalar value to multiply each reduced row by.
91 * - mul_byscalar True if each reduced column must be multiplied by a scalar value.
Gian Marco Iodiceab182122017-10-09 15:05:40 +010092 */
Michele Di Giorgioa602f032020-03-12 19:34:33 +000093 void configure(const ITensor *mtx_a, ITensor *vector_sum_row, const GEMMLowpReductionKernelInfo &info) override;
Georgios Pinitasa3b1b462017-11-16 19:24:39 +000094 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixAReductionKernel
95 *
Michele Di Giorgiof9b595a2020-07-03 13:34:52 +010096 * @param[in] mtx_a Input tensor. Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
Michele Di Giorgioa602f032020-03-12 19:34:33 +000097 * @param[in] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32
98 * @param[in] info Kernel metadata:
99 * - k (num_mtx_a_cols) Number of matrix A columns
100 * - is_reshaped (is_interleaved4x4) True if the matrix A has been interleaved4x4
101 * - scalar Scalar value to multiply each reduced row by.
102 * - mul_byscalar True if each reduced column must be multiplied by a scalar value.
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000103 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000104 * @return a status
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000105 */
Michele Di Giorgioa602f032020-03-12 19:34:33 +0000106 static Status validate(const ITensorInfo *mtx_a, const ITensorInfo *vector_sum_row, const GEMMLowpReductionKernelInfo &info);
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100107
108 // Inherited methods overridden:
109 void run(const Window &window, const ThreadInfo &info) override;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100110
111private:
112 /** Execution of the reduction kernel specialized on the input type
113 *
114 * @param[in] window Execution window
115 */
116 template <typename T>
117 void run_internal(const Window &window);
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100118};
119
120/** NEON kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
121 *
122 * @note This stage is needed to handle the offset of matrix product
123 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
124 */
125class NEGEMMLowpMatrixBReductionKernel : public INEGEMMLowpReductionKernel
126{
127public:
Anthony Barbiere8a49832018-01-18 10:04:05 +0000128 const char *name() const override
129 {
130 return "NEGEMMLowpMatrixBReductionKernel";
131 }
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100132 /** Initialise the kernel's input and output.
133 *
Michele Di Giorgiof9b595a2020-07-03 13:34:52 +0100134 * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
Michele Di Giorgioa602f032020-03-12 19:34:33 +0000135 * @param[out] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32
136 * @param[in] info Kernel metadata:
137 * - k (num_mtx_b_rows) Number of matrix B rows.
138 * - is_reshaped (is_transposed1xW) True if the input tensor is transposed 1xW.
139 * - scalar Scalar value to multiply each reduced row by.
140 * - mul_byscalar True if each reduced row must be multiplied by a scalar value.
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100141 */
Michele Di Giorgioa602f032020-03-12 19:34:33 +0000142 void configure(const ITensor *mtx_b, ITensor *vector_sum_col, const GEMMLowpReductionKernelInfo &info) override;
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000143 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixBReductionKernel
144 *
Michele Di Giorgiof9b595a2020-07-03 13:34:52 +0100145 * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8/QASYMM8_SIGNED/QSYMM8/QSYMM8_PER_CHANNEL
Michele Di Giorgioa602f032020-03-12 19:34:33 +0000146 * @param[in] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32
147 * @param[in] info Kernel metadata:
148 * - k (num_mtx_b_rows) Number of matrix B rows.
149 * - is_reshaped (is_transposed1xW) True if the input tensor is transposed 1xW.
150 * - scalar Scalar value to multiply each reduced row by.
151 * - mul_byscalar True if each reduced row must be multiplied by a scalar value.
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000152 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000153 * @return a status
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000154 */
Michele Di Giorgioa602f032020-03-12 19:34:33 +0000155 static Status validate(const ITensorInfo *mtx_b, const ITensorInfo *vector_sum_col, const GEMMLowpReductionKernelInfo &info);
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100156
157 // Inherited methods overridden:
158 void run(const Window &window, const ThreadInfo &info) override;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100159
160private:
161 /** Execution of the reduction kernel specialized on the input type
162 *
163 * @param[in] window Execution window
164 * @param[in] info Thread-related information
165 */
166 template <typename T>
167 void run_internal(const Window &window, const ThreadInfo &info);
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100168};
169} // namespace arm_compute
170
Michalis Spyrouf4643372019-11-29 16:17:13 +0000171#endif /* ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H */