blob: 508159eb77475edab22f239eb3509bc686f9dfe9 [file] [log] [blame]
Gian Marco Iodiceab182122017-10-09 15:05:40 +01001/*
George Wort2d7e6832019-02-22 16:37:41 +00002 * Copyright (c) 2017-2019 ARM Limited.
Gian Marco Iodiceab182122017-10-09 15:05:40 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H
25#define ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H
Gian Marco Iodiceab182122017-10-09 15:05:40 +010026
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010027#include "NEActivationLayer.h"
Gian Marco Iodiceab182122017-10-09 15:05:40 +010028#include "arm_compute/core/NEON/INEKernel.h"
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010029#include "arm_compute/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
30#include "arm_compute/core/NEON/kernels/NEConvertQuantizedSignednessKernel.h"
Gian Marcoe75a02b2017-11-08 12:24:09 +000031#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionKernel.h"
George Wort2d7e6832019-02-22 16:37:41 +000032#include "arm_compute/core/NEON/kernels/NEGEMMLowpOffsetContributionOutputStageKernel.h"
Gian Marcoe75a02b2017-11-08 12:24:09 +000033#include "arm_compute/core/NEON/kernels/NEGEMMLowpReductionKernel.h"
Gian Marco Iodiceab182122017-10-09 15:05:40 +010034#include "arm_compute/runtime/IFunction.h"
35#include "arm_compute/runtime/IMemoryManager.h"
36#include "arm_compute/runtime/MemoryGroup.h"
Anthony Barbier71d9b572018-07-06 17:05:59 +010037#include "arm_compute/runtime/NEON/functions/NEGEMMAssemblyDispatch.h"
Gian Marco Iodiceab182122017-10-09 15:05:40 +010038#include "arm_compute/runtime/Tensor.h"
39
40#include <memory>
41
42namespace arm_compute
43{
44class ITensor;
45
46/** Basic function to execute GEMMLowpMatrixMultiplyCore on NEON. This function calls the following NEON kernels if the DOT product instruction is not available:
47 *
48 * -# @ref NEGEMMInterleave4x4Kernel
49 * -# @ref NEGEMMTranspose1xWKernel
50 * -# @ref NEGEMMLowpMatrixMultiplyKernel
Gian Marcoe75a02b2017-11-08 12:24:09 +000051 * -# @ref NEGEMMLowpOffsetContributionKernel
Georgios Pinitas48b3ef82019-10-14 19:03:09 +010052 * -# @ref NEActivationLayer
Gian Marco Iodiceab182122017-10-09 15:05:40 +010053 *
54 * otherwise if the DOT product instruction is available:
55 *
Gian Marcoe75a02b2017-11-08 12:24:09 +000056 * -# @ref NEGEMMLowpOffsetContributionKernel
Gian Marco Iodiceab182122017-10-09 15:05:40 +010057 *
58*/
59class NEGEMMLowpMatrixMultiplyCore : public IFunction
60{
61public:
62 /** Constructor */
63 NEGEMMLowpMatrixMultiplyCore(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
Georgios Pinitas72219332018-06-05 14:56:06 +010064 /** Prevent instances of this class from being copied (As this class contains pointers) */
65 NEGEMMLowpMatrixMultiplyCore(const NEGEMMLowpMatrixMultiplyCore &) = delete;
66 /** Default move constructor */
67 NEGEMMLowpMatrixMultiplyCore(NEGEMMLowpMatrixMultiplyCore &&) = default;
68 /** Prevent instances of this class from being copied (As this class contains pointers) */
69 NEGEMMLowpMatrixMultiplyCore &operator=(const NEGEMMLowpMatrixMultiplyCore &) = delete;
70 /** Default move assignment operator */
71 NEGEMMLowpMatrixMultiplyCore &operator=(NEGEMMLowpMatrixMultiplyCore &&) = default;
Gian Marco Iodiceab182122017-10-09 15:05:40 +010072 /** Initialise the kernel's inputs, output
Anthony Barbierf202e502017-11-23 18:02:04 +000073 *
74 * @note GEMM_LOWP: low precision GEMM kernel
75 * This kernel performs the following computations:
76 *
77 * -# Convert a values from QASYMM8 to int32 and add a_offset to each of them.
78 * -# Convert b values from QASYMM8 to int32 add b_offset to each of them.
79 * -# Compute the matrix product of the resulting a * b in int32.
80 *
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010081 * @note The @p output type is S32 if @p gemm_info.type == GEMMLowpOutputStageType::NONE. It is QASYMM8/QASYMM8_SIGNED otherwise
George Wort2d7e6832019-02-22 16:37:41 +000082 *
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010083 * @param[in] a First input tensor (Matrix A). Data type supported: QASYMM8/QASYMM8_SIGNED.
Chunosov5124be52017-11-22 20:42:13 +070084 * @param[in] b Second input tensor (Matrix B). Data type supported: same as @p a
Gian Marco Iodice4b908652018-10-18 10:21:02 +010085 * @param[in] c Third input tensor (Matrix C). It can be a nullptr. Data type supported: S32
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010086 * @param[out] output Output tensor. Data type supported: Data type supported: S32/QASYMM8/QASYMM8_SIGNED
Chunosov5124be52017-11-22 20:42:13 +070087 * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
88 * if the reshape of matrix B should be executed only for the first run
Anthony Barbierf202e502017-11-23 18:02:04 +000089 */
Gian Marco Iodice4b908652018-10-18 10:21:02 +010090 void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *output, const GEMMInfo &gemm_info = GEMMInfo());
Georgios Pinitasa3b1b462017-11-16 19:24:39 +000091 /** Static function to check if given info will lead to a valid configuration of @ref NEGEMMLowpMatrixMultiplyCore
92 *
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010093 * @note The @p output type is S32 if @p gemm_info.type == GEMMLowpOutputStageType::NONE. It is QASYMM8/QASYMM8_SIGNED otherwise
George Wort2d7e6832019-02-22 16:37:41 +000094 *
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010095 * @param[in] a First input tensor info (Matrix A). Data type supported: QASYMM8/QASYMM8_SIGNED.
George Wort2d7e6832019-02-22 16:37:41 +000096 * @param[in] b Second input tensor info (Matrix B). Data type supported: same as @p a
97 * @param[in] c Third input tensor info (Matrix C). It can be a nullptr. Data type supported: S32
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +010098 * @param[in] output Output tensor info. Data type supported: Data type supported: S32/QASYMM8/QASYMM8_SIGNED
Georgios Pinitas358ca202017-12-07 16:47:52 +000099 * @param[in] gemm_info (Optional) Specifies if the matrix A and/or matrix B have been reshaped and
100 * if the reshape of matrix B should be executed only for the first run
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000101 *
Georgios Pinitas631c41a2017-12-06 11:53:03 +0000102 * @return a status
Georgios Pinitasa3b1b462017-11-16 19:24:39 +0000103 */
Gian Marco Iodice4b908652018-10-18 10:21:02 +0100104 static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, const GEMMInfo &gemm_info = GEMMInfo());
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100105
Pablo Tellofc004492018-03-23 11:40:05 +0000106 // Inherited methods overridden
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100107 void run() override;
Georgios Pinitas72219332018-06-05 14:56:06 +0100108 void prepare() override;
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100109
110private:
George Wort2d7e6832019-02-22 16:37:41 +0000111 MemoryGroup _memory_group;
112 NEGEMMAssemblyDispatch _asm_glue;
113 std::unique_ptr<INEKernel> _mm_kernel;
114 std::unique_ptr<INEKernel> _mtx_a_reshape_kernel;
115 std::unique_ptr<INEKernel> _mtx_b_reshape_kernel;
116 NEGEMMLowpMatrixAReductionKernel _mtx_a_reduction_kernel;
117 NEGEMMLowpMatrixBReductionKernel _mtx_b_reduction_kernel;
118 NEGEMMLowpOffsetContributionKernel _offset_contribution_kernel;
119 NEGEMMLowpOffsetContributionOutputStageKernel _offset_contribution_output_stage_kernel;
Georgios Pinitas48b3ef82019-10-14 19:03:09 +0100120 NEActivationLayer _activation_func;
Georgios Pinitasdbdea0d2019-10-16 19:21:40 +0100121 NEConvertQuantizedSignednessKernel _convert_to_signed_asymm;
122 NEConvertQuantizedSignednessKernel _convert_from_signed_asymm;
123
124 Tensor _vector_sum_col;
125 Tensor _vector_sum_row;
126 Tensor _tmp_a;
127 Tensor _tmp_b;
128 Tensor _mm_result_s32;
129 Tensor _signed_a;
130 Tensor _signed_output;
131 const ITensor *_original_b;
132 int32_t _a_offset;
133 int32_t _b_offset;
134
135 bool _run_vector_matrix_multiplication;
136 bool _assembly_path;
137 bool _fused_assembly_path;
138 bool _reshape_b_only_on_first_run;
139 bool _is_prepared;
140 bool _fuse_output_stage;
141 bool _run_activation;
142 bool _flip_signedness;
Gian Marco Iodiceab182122017-10-09 15:05:40 +0100143};
George Wort2d7e6832019-02-22 16:37:41 +0000144} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000145#endif /*ARM_COMPUTE_NEGEMMLOWPMATRIXMULTIPLYCORE_H */