blob: 962645749e80d222a95503dd4db3739be913088b [file] [log] [blame]
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +00001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2018-2020 Arm Limited.
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +00003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouf4643372019-11-29 16:17:13 +000024#ifndef ARM_COMPUTE_CLGEMMMATRIXMULTIPLYRESHAPEDKERNEL_H
25#define ARM_COMPUTE_CLGEMMMATRIXMULTIPLYRESHAPEDKERNEL_H
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +000026
27#include "arm_compute/core/CL/ICLKernel.h"
28
Gian Marco Iodice7026b302019-06-26 17:18:11 +010029#include "arm_compute/core/KernelDescriptors.h"
30
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +000031namespace arm_compute
32{
33class ICLTensor;
34
35/** OpenCL kernel to multiply matrices when both the input matrices LHS (input0) and RHS (input1) have been reshaped
36 *
37 * @note The input matrices @p input0 and @p input1 must be reshaped through @ref CLGEMMReshapeLHSMatrixKernel and @ref CLGEMMReshapeRHSMatrixKernel
38 */
39class CLGEMMMatrixMultiplyReshapedKernel : public ICLKernel
40{
41public:
42 /** Default Constructor */
43 CLGEMMMatrixMultiplyReshapedKernel();
44 /** Prevent instances of this class from being copied (As this class contains pointers) */
45 CLGEMMMatrixMultiplyReshapedKernel(const CLGEMMMatrixMultiplyReshapedKernel &) = delete;
46 /** Prevent instances of this class from being copied (As this class contains pointers) */
47 CLGEMMMatrixMultiplyReshapedKernel &operator=(const CLGEMMMatrixMultiplyReshapedKernel &) = delete;
48 /** Allow instances of this class to be moved */
49 CLGEMMMatrixMultiplyReshapedKernel(CLGEMMMatrixMultiplyReshapedKernel &&) = default;
50 /** Allow instances of this class to be moved */
51 CLGEMMMatrixMultiplyReshapedKernel &operator=(CLGEMMMatrixMultiplyReshapedKernel &&) = default;
52 /** Initialise the kernel's input and output.
53 *
Gian Marco Iodice0c17aa22019-09-27 09:23:15 +010054 * @note The F16 computation also supports mixed precision through the gemm_info.fp_mixed_precision flag.
55 * Mixed precision combines different floating precisions during the computation, in particular, F32 for the accumulations and F16 for the
56 * multiplications. i.e. float c = (half)a * (half)b
57 *
Gian Marco Iodicee3a849a2020-06-10 17:59:30 +010058 * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will fetch the RHS data using the OpenCL read_image built-in function.
59 * Reading from the OpenCL image object can increase the performance. However, since the OpenCL image object is created importing the OpenCL buffer,
60 * the following conditions are required:
61 * -# rhs_info.n0 can only be 4, 8 and 16
62 * -# rhs_info.k0 can only be 4, 8 and 16
63 * -# Data type can only be F32
64 * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
65 * -# The stride Y for the input1 should satisfy the OpenCL pitch alignment requirement
66 * -# input1 width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
67 * -# input1 (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
68 *
69 * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F16/F32 (only F32 if rhs_info.export_to_cl_image = true). The number of dimensions for the LHS matrix must be less or equal than 4
Manuel Bottini2d2551e2019-03-27 17:31:27 +000070 * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3
Gian Marco Iodicee16c8902019-06-14 16:11:10 +010071 * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0.
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +000072 * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
73 * @param[in] alpha Weight of the matrix product
Gian Marco Iodicee16c8902019-06-14 16:11:10 +010074 * @param[in] beta Weight of the matrix bias
Gian Marco Iodicee3a849a2020-06-10 17:59:30 +010075 * @param[in] lhs_info LHS matrix information used for reshaping the input0 tensor. Only the following values are supported:
Manuel Bottini2d2551e2019-03-27 17:31:27 +000076 * lhs_info.m0: 2,3,4,5,6,7,8
77 * lhs_info.k0: 2,3,4,8,16
78 * lhs_info.transpose: false
79 * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
Gian Marco Iodicee3a849a2020-06-10 17:59:30 +010080 * rhs_info.n0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
81 * rhs_info.k0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
Manuel Bottini2d2551e2019-03-27 17:31:27 +000082 * rhs_info.transpose: true
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +000083 * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
Manuel Bottini2d2551e2019-03-27 17:31:27 +000084 *
85 * @note lhs_info.k0 must be equal to rhs_info.k0
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +000086 */
Gian Marco Iodicee16c8902019-06-14 16:11:10 +010087 void configure(const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
88 const GEMMRHSMatrixInfo &rhs_info,
Gian Marco Iodice7026b302019-06-26 17:18:11 +010089 const GEMMKernelInfo &gemm_info);
Manuel Bottini4c6bd512020-04-08 10:15:51 +010090 /** Initialise the kernel's input and output.
91 *
92 * @note The F16 computation also supports mixed precision through the gemm_info.fp_mixed_precision flag.
93 * Mixed precision combines different floating precisions during the computation, in particular, F32 for the accumulations and F16 for the
94 * multiplications. i.e. float c = (half)a * (half)b
95 *
Gian Marco Iodicee3a849a2020-06-10 17:59:30 +010096 * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will fetch the RHS data using the OpenCL read_image built-in function.
97 * Reading from the OpenCL image object can increase the performance. However, since the OpenCL image object is created importing the OpenCL buffer,
98 * the following conditions are required:
99 * -# rhs_info.n0 can only be 4, 8 and 16
100 * -# rhs_info.k0 can only be 4, 8 and 16
101 * -# Data type can only be F32
102 * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
103 * -# The stride Y for the input1 should satisfy the OpenCL pitch alignment requirement
104 * -# input1 width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
105 * -# input1 (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
106 *
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100107 * @param[in] compile_context The compile context to be used.
Gian Marco Iodicee3a849a2020-06-10 17:59:30 +0100108 * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F16/F32 (only F32 if rhs_info.export_to_cl_image = true). The number of dimensions for the LHS matrix must be less or equal than 4
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100109 * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3
110 * @param[in] input2 Input tensor containing the bias matrix. Data type supported: same as @p input0.
111 * @param[out] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
112 * @param[in] alpha Weight of the matrix product
113 * @param[in] beta Weight of the matrix bias
114 * @param[in] lhs_info LHS matrix information used for reshaping the input0 tensor. Only the following values are supported:
115 * lhs_info.m0: 2,3,4,5,6,7,8
116 * lhs_info.k0: 2,3,4,8,16
117 * lhs_info.transpose: false
118 * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
Gian Marco Iodicee3a849a2020-06-10 17:59:30 +0100119 * rhs_info.n0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
120 * rhs_info.k0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100121 * rhs_info.transpose: true
122 * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
123 *
124 * @note lhs_info.k0 must be equal to rhs_info.k0
125 */
Manuel Bottini679fc962020-04-21 16:08:53 +0100126 void configure(const CLCompileContext &compile_context, const ICLTensor *input0, const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float alpha, float beta,
Manuel Bottini4c6bd512020-04-08 10:15:51 +0100127 const GEMMLHSMatrixInfo &lhs_info,
128 const GEMMRHSMatrixInfo &rhs_info,
129 const GEMMKernelInfo &gemm_info);
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000130 /** Static function to check if given info will lead to a valid configuration of @ref CLGEMMMatrixMultiplyReshapedKernel
131 *
Gian Marco Iodicee3a849a2020-06-10 17:59:30 +0100132 * @note The F16 computation also supports mixed precision through the gemm_info.fp_mixed_precision flag.
133 * Mixed precision combines different floating precisions during the computation, in particular, F32 for the accumulations and F16 for the
134 * multiplications. i.e. float c = (half)a * (half)b
135 *
136 * @note If rhs_info.export_to_cl_image = true, this OpenCL kernel will fetch the RHS data using the OpenCL read_image built-in function.
137 * Reading from the OpenCL image object can increase the performance. However, since the OpenCL image object is created importing the OpenCL buffer,
138 * the following conditions are required:
139 * -# rhs_info.n0 can only be 4, 8 and 16
140 * -# rhs_info.k0 can only be 4, 8 and 16
141 * -# Data type can only be F32
142 * -# The platform should support the OpenCL cl_khr_image2d_from_buffer extension
143 * -# The stride Y for the input1 should satisfy the OpenCL pitch alignment requirement
144 * -# input1 width should be less or equal to (CL_DEVICE_IMAGE2D_MAX_WIDTH * 4)
145 * -# input1 (height * depth) should be less or equal to CL_DEVICE_IMAGE2D_MAX_HEIGHT
146 *
147 * @param[in] input0 Input tensor containing the LHS reshaped matrix. Data type supported: F16/F32 (only F32 if rhs_info.export_to_cl_image = true). The number of dimensions for the LHS matrix must be less or equal than 4
Manuel Bottini2d2551e2019-03-27 17:31:27 +0000148 * @param[in] input1 Input tensor containing the RHS reshaped matrix. Data type supported: same as @p input0. The number of dimensions for the RHS matrix must be less or equal than 3
Gian Marco Iodicee16c8902019-06-14 16:11:10 +0100149 * @param[in] input2 Input tensor info containing the bias matrix. Data type supported: same as @p input0.
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000150 * @param[in] output Output tensor to store the result of matrix multiplication. Data type supported: same as @p input0
151 * @param[in] alpha Weight of the matrix product
Gian Marco Iodicee16c8902019-06-14 16:11:10 +0100152 * @param[in] beta Weight of the matrix bias
Manuel Bottini2d2551e2019-03-27 17:31:27 +0000153 * @param[in] lhs_info LHS matrix information used for reshaping the input0 tensor. Only the following values are supported:
154 * lhs_info.m0: 2,3,4,5,6,7,8
155 * lhs_info.k0: 2,3,4,8,16
156 * lhs_info.transpose: false
157 * @param[in] rhs_info RHS matrix information used for reshaping the input1 tensor. Only the following values are supported:
Gian Marco Iodicee3a849a2020-06-10 17:59:30 +0100158 * rhs_info.n0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
159 * rhs_info.k0: 2,3,4,8,16 (only 4, 8 and 16 if rhs_info.export_to_cl_image = true)
Manuel Bottini2d2551e2019-03-27 17:31:27 +0000160 * rhs_info.transpose: true
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000161 * @param[in] gemm_info GEMM information used to retrieve the original dimensions of the input matrices
162 *
Manuel Bottini2d2551e2019-03-27 17:31:27 +0000163 * @note lhs_info.k0 must be equal to rhs_info.k0
164 *
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000165 * @return a status
166 */
Gian Marco Iodicee16c8902019-06-14 16:11:10 +0100167 static Status validate(const ITensorInfo *input0, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float alpha, float beta, const GEMMLHSMatrixInfo &lhs_info,
168 const GEMMRHSMatrixInfo &rhs_info,
Gian Marco Iodice7026b302019-06-26 17:18:11 +0100169 const GEMMKernelInfo &gemm_info);
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000170
171 // Inherited methods overridden:
172 void run(const Window &window, cl::CommandQueue &queue) override;
173
174private:
175 const ICLTensor *_input0;
176 const ICLTensor *_input1;
Gian Marco Iodicee16c8902019-06-14 16:11:10 +0100177 const ICLTensor *_input2;
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000178 ICLTensor *_output;
179 bool _slide_matrix_b;
180 bool _reinterpret_output_as_3d;
Gian Marco Iodiceb0c50372019-03-15 10:13:05 +0000181 bool _use_dummy_work_items;
Gian Marco Iodicee16c8902019-06-14 16:11:10 +0100182 bool _add_bias;
183 bool _broadcast_bias;
Gian Marco Iodicee3a849a2020-06-10 17:59:30 +0100184 bool _export_to_cl_image;
Gian Marco Iodicee5563d92020-06-25 17:18:36 +0100185 unsigned int _k;
Gian Marco Iodicebf9731e2018-12-12 10:18:04 +0000186};
187} // namespace arm_compute
Michalis Spyrouf4643372019-11-29 16:17:13 +0000188#endif /*ARM_COMPUTE_CLGEMMMATRIXMULTIPLYRESHAPEDKERNEL_H*/