blob: 4eab86d00a736067f8f90fd463f8a6faaca773c3 [file] [log] [blame]
Gian Marco Iodiceab182122017-10-09 15:05:40 +01001/*
Anthony Barbiere8a49832018-01-18 10:04:05 +00002 * Copyright (c) 2017-2018 ARM Limited.
Gian Marco Iodiceab182122017-10-09 15:05:40 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#ifndef __ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H__
25#define __ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H__
26
27#include "arm_compute/core/NEON/INEKernel.h"
28
29namespace arm_compute
30{
31class ITensor;
32
33/** Common interface for all NEON reduction kernels */
34class INEGEMMLowpReductionKernel : public INEKernel
35{
36public:
37 /** Constructor */
38 INEGEMMLowpReductionKernel();
39 /** Prevent instances of this class from being copied (As this class contains pointers)*/
40 INEGEMMLowpReductionKernel(const INEGEMMLowpReductionKernel &) = delete;
41 /** Prevent instances of this class from being copied (As this class contains pointers)*/
42 INEGEMMLowpReductionKernel &operator=(const INEGEMMLowpReductionKernel &) = delete;
43 /** Allow instances of this class to be moved */
44 INEGEMMLowpReductionKernel(INEGEMMLowpReductionKernel &&) = default;
45 /** Allow instances of this class to be moved */
46 INEGEMMLowpReductionKernel &operator=(INEGEMMLowpReductionKernel &&) = default;
47
Gian Marco Iodiceab182122017-10-09 15:05:40 +010048 /** Initialise the kernel's input and output.
49 *
Gian Marco05288a22017-11-21 10:57:50 +000050 * @param[in] input Input tensor. Data type supported: QASYMM8
Gian Marco Iodiceab182122017-10-09 15:05:40 +010051 * @param[out] output Output row-vector of sums of all the entries in each row/col of input tensor. Data type supported: S32
52 * @param[in] k Number of matrix A columns (or matrix B rows)
53 * @param[in] is_reshaped True if the input tensor has been reshaped
54 */
55 virtual void configure(const ITensor *input, ITensor *output, int32_t k, bool is_reshaped) = 0;
56
57protected:
58 const ITensor *_input;
59 ITensor *_output;
60 int32_t _k;
61 bool _is_reshaped;
62};
63
64/** NEON kernel used to compute the row-vectors of sums of all the entries in each row of Matrix A.
65 *
66 * @note This stage is needed to handle the offset of matrix product
67 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
68 */
69class NEGEMMLowpMatrixAReductionKernel : public INEGEMMLowpReductionKernel
70{
71public:
Anthony Barbiere8a49832018-01-18 10:04:05 +000072 const char *name() const override
73 {
74 return "NEGEMMLowpMatrixAReductionKernel";
75 }
Gian Marco Iodiceab182122017-10-09 15:05:40 +010076 /** Initialise the kernel's input and output.
77 *
Gian Marcoe75a02b2017-11-08 12:24:09 +000078 * @param[in] mtx_a Input tensor. Data type supported: QASYMM8
79 * @param[out] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32
80 * @param[in] num_mtx_a_cols Number of matrix A columns
81 * @param[in] is_interleaved4x4 True if the matrix A has been interleaved4x4
Gian Marco Iodiceab182122017-10-09 15:05:40 +010082 */
Gian Marcoe75a02b2017-11-08 12:24:09 +000083 void configure(const ITensor *mtx_a, ITensor *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4) override;
Georgios Pinitasa3b1b462017-11-16 19:24:39 +000084 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixAReductionKernel
85 *
86 * @param[in] mtx_a Input tensor. Data type supported: QASYMM8
87 * @param[in] vector_sum_row Output row-vector of sums of all the entries in each row of mtx_a. Data type supported: S32
88 * @param[in] num_mtx_a_cols Number of matrix A columns
89 * @param[in] is_interleaved4x4 True if the matrix A has been interleaved4x4
90 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +000091 * @return a status
Georgios Pinitasa3b1b462017-11-16 19:24:39 +000092 */
Georgios Pinitas631c41a2017-12-06 11:53:03 +000093 static Status validate(const ITensorInfo *mtx_a, const ITensorInfo *vector_sum_row, int32_t num_mtx_a_cols, bool is_interleaved4x4);
Gian Marco Iodiceab182122017-10-09 15:05:40 +010094
95 // Inherited methods overridden:
96 void run(const Window &window, const ThreadInfo &info) override;
97};
98
99/** NEON kernel used to compute the row-vectors of sums of all the entries in each column of Matrix B.
100 *
101 * @note This stage is needed to handle the offset of matrix product
102 * https://github.com/google/gemmlowp/blob/master/doc/low-precision.md
103 */
104class NEGEMMLowpMatrixBReductionKernel : public INEGEMMLowpReductionKernel
105{
106public:
Anthony Barbiere8a49832018-01-18 10:04:05 +0000107 const char *name() const override
108 {
109 return "NEGEMMLowpMatrixBReductionKernel";
110 }
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100111 /** Initialise the kernel's input and output.
112 *
Gian Marcoe75a02b2017-11-08 12:24:09 +0000113 * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8
114 * @param[out] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32
115 * @param[in] num_mtx_b_rows Number of matrix B rows
116 * @param[in] is_transposed1xW True if the input tensor is transposed 1xW
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100117 */
Gian Marcoe75a02b2017-11-08 12:24:09 +0000118 void configure(const ITensor *mtx_b, ITensor *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW) override;
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000119 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixBReductionKernel
120 *
121 * @param[in] mtx_b Input tensor. Data type supported: Data type supported: QASYMM8
122 * @param[in] vector_sum_col Output row-vector of sums of all the entries in each column of mtx_b. Data type supported: S32
123 * @param[in] num_mtx_b_rows Number of matrix B rows
124 * @param[in] is_transposed1xW True if the input tensor is transposed 1xW
125 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000126 * @return a status
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000127 */
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000128 static Status validate(const ITensorInfo *mtx_b, const ITensorInfo *vector_sum_col, int32_t num_mtx_b_rows, bool is_transposed1xW);
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100129
130 // Inherited methods overridden:
131 void run(const Window &window, const ThreadInfo &info) override;
132};
133} // namespace arm_compute
134
135#endif /* __ARM_COMPUTE_NEGEMMLOWREDUCTIONKERNEL_H__ */