blob: 580acc16a7e3ab9bda47e9def2cb88d4f649f715 [file] [log] [blame]
Anthony Barbier7068f992017-10-26 15:23:08 +01001/*
Stephen Lie855c232018-01-04 14:13:22 +08002 * Copyright (c) 2017-2018 ARM Limited.
Anthony Barbier7068f992017-10-26 15:23:08 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
zhenglin19e91422018-01-03 12:14:13 +080025#include "helpers_cs.h"
26
27#if defined(DATA_TYPE_FP16)
28precision mediump float;
29#endif // DATA_TYPE_FP16
Anthony Barbier7068f992017-10-26 15:23:08 +010030
31#if defined(DATA_TYPE_FP32)
Anthony Barbier7068f992017-10-26 15:23:08 +010032#ifdef GEMM_TRANSPOSE1xW
Anthony Barbier7068f992017-10-26 15:23:08 +010033/** This OpenGL ES kernel computes the "vector" 1x4 transposition of input matrix
34 *
zhenglin19e91422018-01-03 12:14:13 +080035 * @param[in] src_ptr Pointer to the source matrix. Supported data types: F32
36 * @param[in] src_attrs The attributes of the source matrix
37 * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
38 * @param[in] dst_attrs The attributes of the destination matrix
Anthony Barbier7068f992017-10-26 15:23:08 +010039 */
zhenglin19e91422018-01-03 12:14:13 +080040SHADER_PARAMS_DECLARATION
41{
42 ImageAttributes src_attrs;
43 ImageAttributes dst_attrs;
44};
45TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
46TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
47
Anthony Barbier7068f992017-10-26 15:23:08 +010048void main(void)
49{
50 /* Compute address for Matrix B - source */
zhenglin19e91422018-01-03 12:14:13 +080051 ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
52 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
Anthony Barbier7068f992017-10-26 15:23:08 +010053
54 /* Compute address for Matrix B transposed - destination. X and Y are swapped */
zhenglin19e91422018-01-03 12:14:13 +080055 TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, gl_GlobalInvocationID.y * uint(16) + gl_GlobalInvocationID.x * dst_attrs.stride_y);
56
57 vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src_ptr, src_iter);
58 VSTORE4_CURRENT_ITEM(dst_ptr, dst_iter, b0);
Anthony Barbier7068f992017-10-26 15:23:08 +010059}
60#endif /* GEMM_TRANSPOSE1xW */
61
62#ifdef GEMM_INTERLEAVE4x4
Anthony Barbier7068f992017-10-26 15:23:08 +010063/** This OpenGLES kernel reshapes the input matrix interleaving the values
64 *
zhenglin19e91422018-01-03 12:14:13 +080065 * @param[in] src_ptr Pointer to the source matrix. Supported data types: F32
66 * @param[in] src_attrs The attributes of the source matrix
67 * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
68 * @param[in] dst_attrs The attributes of the destination matrix
Anthony Barbier7068f992017-10-26 15:23:08 +010069 */
zhenglin19e91422018-01-03 12:14:13 +080070SHADER_PARAMS_DECLARATION
71{
72 ImageAttributes src_attrs;
73 ImageAttributes dst_attrs;
74};
75TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
76TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
77
Anthony Barbier7068f992017-10-26 15:23:08 +010078void main(void)
79{
80 /* Compute source and destination addresses */
zhenglin19e91422018-01-03 12:14:13 +080081 ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
82 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
Anthony Barbier7068f992017-10-26 15:23:08 +010083
84 int i;
85 int j;
86
87 for(i = 0; i < 4; ++i)
88 {
89 for(j = 0; j < 4; ++j)
90 {
zhenglin19e91422018-01-03 12:14:13 +080091 float res = LOAD(src_ptr, IMAGE_OFFSET(src_iter, i, j));
92 STORE(dst_ptr, TENSOR_OFFSET_ADVANCE(dst_iter, (i * 4 + j)), res);
Anthony Barbier7068f992017-10-26 15:23:08 +010093 }
94 }
95}
96#endif /* GEMM_INTERLEAVE4x4 */
97
98#ifdef GEMM_ACCUMULATE_BIASES
Anthony Barbier7068f992017-10-26 15:23:08 +010099/** This kernel accumulates each row with the biases vector
100 *
zhenglin19e91422018-01-03 12:14:13 +0800101 * @param[in, out] accum_ptr Pointer to the accumulate tensor. Supported data type: F32
102 * @param[in] accum_attrs The attributes of the accumulate tensor
103 * @param[in] biases_ptr Pointer to the biases vector. Same as @p accum_ptr
104 * @param[in] biases_attrs The attributes of the biases tensor
Anthony Barbier7068f992017-10-26 15:23:08 +0100105 */
zhenglin19e91422018-01-03 12:14:13 +0800106SHADER_PARAMS_DECLARATION
107{
108 ImageAttributes accum_attrs;
109 VectorAttributes biases_attrs;
110};
111TENSOR_DECLARATION(1, accumBuffer, float, accum_ptr, accum_shift, 2, restrict);
112TENSOR_DECLARATION(2, biasesBuffer, float, biases_ptr, biases_shift, 2, readonly);
113
Anthony Barbier7068f992017-10-26 15:23:08 +0100114void main(void)
115{
zhenglin19e91422018-01-03 12:14:13 +0800116 ImageIterator accum_iter = CONVERT_TO_IMAGE_ITERATOR(accum_attrs, accum_shift);
117 VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR(biases_attrs, biases_shift);
Anthony Barbier7068f992017-10-26 15:23:08 +0100118
119 for(int i = 0; i < 16; ++i)
120 {
zhenglin19e91422018-01-03 12:14:13 +0800121 float accum_value = LOAD(accum_ptr, TENSOR_OFFSET_ADVANCE(accum_iter, i));
122 float biases_value = LOAD(biases_ptr, TENSOR_OFFSET_ADVANCE(biases_iter, i));
Anthony Barbier7068f992017-10-26 15:23:08 +0100123 accum_value = biases_value + accum_value;
124
125 // Store result in the accummulate buffer
zhenglin19e91422018-01-03 12:14:13 +0800126 STORE(accum_ptr, TENSOR_OFFSET_ADVANCE(accum_iter, i), accum_value);
Anthony Barbier7068f992017-10-26 15:23:08 +0100127 }
128}
129#endif /* GEMM_ACCUMULATE_BIASES */
130
131#ifdef GEMM_MM_INTERLEAVED_TRANSPOSED /* unvalidate */
Anthony Barbier7068f992017-10-26 15:23:08 +0100132/** This OpenGL ES kernel is optimised for Midgard. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
133 * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
134 *
135 * @attention The width of matrix B and the alpha's value need to be passed at compile time using WIDTH_MATRIX_B and ALPHA
136 *
zhenglin19e91422018-01-03 12:14:13 +0800137 * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
138 * @param[in] src0_attrs The attributes of the source matrix
139 * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
140 * @param[in] src1_attrs The attributes of the source matrix
141 * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
142 * @param[in] dst_attrs The attributes of the destination matrix
Anthony Barbier7068f992017-10-26 15:23:08 +0100143 */
zhenglin19e91422018-01-03 12:14:13 +0800144SHADER_PARAMS_DECLARATION
145{
146 ImageAttributes src0_attrs;
147 ImageAttributes src1_attrs;
148 ImageAttributes dst_attrs;
149};
150TENSOR_DECLARATION(1, src0Buffer, float, src0_ptr, src0_shift, 2, readonly);
151TENSOR_DECLARATION(2, src1Buffer, float, src1_ptr, src1_shift, 2, readonly);
152TENSOR_DECLARATION(3, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
153
Anthony Barbier7068f992017-10-26 15:23:08 +0100154void main()
155{
zhenglin19e91422018-01-03 12:14:13 +0800156 ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
157 ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
158 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
Anthony Barbier7068f992017-10-26 15:23:08 +0100159
160 /* Compute address for matrix A and B */
zhenglin19e91422018-01-03 12:14:13 +0800161 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * (src0_attrs.stride_y));
162 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(gl_GlobalInvocationID.x) * (src1_attrs.stride_y));
Anthony Barbier7068f992017-10-26 15:23:08 +0100163 /* Compute end row address for matrix B */
zhenglin19e91422018-01-03 12:14:13 +0800164 int end_row_mtx_b = int(TENSOR_OFFSET_ADVANCE(src1_iter, COLS_B));
Anthony Barbier7068f992017-10-26 15:23:08 +0100165
166 /* Reset accumulators */
167 vec4 c00 = vec4(0.0f);
168 vec4 c10 = vec4(0.0f);
169 vec4 c20 = vec4(0.0f);
170 vec4 c30 = vec4(0.0f);
171
172 // FIXME: loop unrolling really needed for GLES?
zhenglin19e91422018-01-03 12:14:13 +0800173 for(; int(CURRENT_ITEM_OFFSET(src1_iter)) <= (end_row_mtx_b - 8); TENSOR_ITERATOR_ADVANCE(src0_iter, 8), TENSOR_ITERATOR_ADVANCE(src1_iter, 8))
Anthony Barbier7068f992017-10-26 15:23:08 +0100174 {
175 /* Load values from matrix A (interleaved) and matrix B (transposed) */
zhenglin19e91422018-01-03 12:14:13 +0800176 vec4 a0 = VLOAD4_CURRENT_ITEM(vec4, src0_ptr, src0_iter);
177 vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
Anthony Barbier7068f992017-10-26 15:23:08 +0100178
179 c00 += vec4(a0.x) * b0;
180 c10 += vec4(a0.y) * b0;
181 c20 += vec4(a0.z) * b0;
182 c30 += vec4(a0.w) * b0;
183
184 /* Load values from matrix A (interleaved) and matrix B (transposed) */
zhenglin19e91422018-01-03 12:14:13 +0800185 a0 = VLOAD4(vec4, src0_ptr, TENSOR_OFFSET_ADVANCE(src0_iter, 4));
186 b0 = VLOAD4(vec4, src1_ptr, TENSOR_OFFSET_ADVANCE(src1_iter, 4));
Anthony Barbier7068f992017-10-26 15:23:08 +0100187
188 c00 += vec4(a0.x) * b0;
189 c10 += vec4(a0.y) * b0;
190 c20 += vec4(a0.z) * b0;
191 c30 += vec4(a0.w) * b0;
192 }
193
zhenglin19e91422018-01-03 12:14:13 +0800194 for(; int(CURRENT_ITEM_OFFSET(src1_iter)) < end_row_mtx_b; TENSOR_ITERATOR_ADVANCE(src0_iter, 4), TENSOR_ITERATOR_ADVANCE(src1_iter, 4))
Anthony Barbier7068f992017-10-26 15:23:08 +0100195 {
196 /* Load values from matrix A (interleaved) and matrix B (transposed) */
zhenglin19e91422018-01-03 12:14:13 +0800197 vec4 a0 = VLOAD4_CURRENT_ITEM(vec4, src0_ptr, src0_iter);
198 vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
Anthony Barbier7068f992017-10-26 15:23:08 +0100199
200 c00 += vec4(a0.x) * b0;
201 c10 += vec4(a0.y) * b0;
202 c20 += vec4(a0.z) * b0;
203 c30 += vec4(a0.w) * b0;
204 }
205
206 /* Multiply by the weight of matrix product */
207 c00 = c00 * vec4(ALPHA);
208 c10 = c10 * vec4(ALPHA);
209 c20 = c20 * vec4(ALPHA);
210 c30 = c30 * vec4(ALPHA);
211
212 /* Store 4x4 block */
zhenglin19e91422018-01-03 12:14:13 +0800213 VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 0), c00);
214 VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), c10);
215 VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), c20);
216 VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), c30);
Anthony Barbier7068f992017-10-26 15:23:08 +0100217}
218#endif /* GEMM_MM_INTERLEAVED_TRANSPOSED */
219
220#ifdef GEMM_MM_FLOATING_POINT
Anthony Barbier7068f992017-10-26 15:23:08 +0100221/** This OpenGL ES kernel computes the matrix multiplication between matrix A (src0) and matrix B (src1)
222 * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
223 *
224 * @attention The width of matrix B and the alpha's value need to be passed at compile time using WIDTH_MATRIX_B and ALPHA
225 *
zhenglin19e91422018-01-03 12:14:13 +0800226 * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F32
227 * @param[in] src0_attrs The attributes of the source matrix
228 * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
229 * @param[in] src1_attrs The attributes of the source matrix
230 * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
231 * @param[in] dst_attrs The attributes of the destination matrix
Anthony Barbier7068f992017-10-26 15:23:08 +0100232 */
zhenglin19e91422018-01-03 12:14:13 +0800233SHADER_PARAMS_DECLARATION
234{
235 ImageAttributes src0_attrs;
236 ImageAttributes src1_attrs;
237 ImageAttributes dst_attrs;
238};
239TENSOR_DECLARATION(1, src0Buffer, float, src0_ptr, src0_shift, 2, readonly);
240TENSOR_DECLARATION(2, src1Buffer, float, src1_ptr, src1_shift, 2, readonly);
241TENSOR_DECLARATION(3, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
242
Anthony Barbier7068f992017-10-26 15:23:08 +0100243void main()
244{
zhenglin19e91422018-01-03 12:14:13 +0800245 ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
246 ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
247 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
Anthony Barbier7068f992017-10-26 15:23:08 +0100248
249 int idx = int(gl_GlobalInvocationID.x) * int(NUM_ELEMS_PROCESSED_PER_THREAD_X);
250 /* Compute the address for the vector A and matrix B */
zhenglin19e91422018-01-03 12:14:13 +0800251 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * (src0_attrs.stride_y) * uint(NUM_ELEMS_PROCESSED_PER_THREAD_Y));
252 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, idx * 4);
Anthony Barbier7068f992017-10-26 15:23:08 +0100253
254 /* Compute end row address for matrix A */
zhenglin19e91422018-01-03 12:14:13 +0800255 int end_row_vec_a = int(TENSOR_OFFSET_ADVANCE_IN_BYTES(src0_iter, COLS_A * 4));
Anthony Barbier7068f992017-10-26 15:23:08 +0100256
257 /* Reset accumulators */
258 vec4 acc0 = vec4(0.0f);
259#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
260 vec4 acc1 = vec4(0.0f);
261#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
262#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
263 vec4 acc2 = vec4(0.0f);
264#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
265#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
266 vec4 acc3 = vec4(0.0f);
267#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
268
zhenglin19e91422018-01-03 12:14:13 +0800269 for(; int(CURRENT_ITEM_OFFSET(src0_iter)) <= (end_row_vec_a - 2); TENSOR_ITERATOR_ADVANCE(src0_iter, 2), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(2) * src1_attrs.stride_y))
Anthony Barbier7068f992017-10-26 15:23:08 +0100270 {
zhenglin19e91422018-01-03 12:14:13 +0800271 vec2 a0 = VLOAD2_CURRENT_ITEM(vec2, src0_ptr, src0_iter);
Anthony Barbier7068f992017-10-26 15:23:08 +0100272#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
zhenglin19e91422018-01-03 12:14:13 +0800273 vec2 a1 = VLOAD2(vec2, src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
Anthony Barbier7068f992017-10-26 15:23:08 +0100274#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
275#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
zhenglin19e91422018-01-03 12:14:13 +0800276 vec2 a2 = VLOAD2(vec2, src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
Anthony Barbier7068f992017-10-26 15:23:08 +0100277#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
278#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
zhenglin19e91422018-01-03 12:14:13 +0800279 vec2 a3 = VLOAD2(vec2, src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
Anthony Barbier7068f992017-10-26 15:23:08 +0100280#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
281
zhenglin19e91422018-01-03 12:14:13 +0800282 vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
283 vec4 b1 = VLOAD4(vec4, src1_ptr, IMAGE_OFFSET(src1_iter, 0, 1));
Anthony Barbier7068f992017-10-26 15:23:08 +0100284
285 acc0 += b0 * vec4(a0.x);
286 acc0 += b1 * vec4(a0.y);
287#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
288 acc1 += b0 * vec4(a1.x);
289 acc1 += b1 * vec4(a1.y);
290#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
291#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
292 acc2 += b0 * vec4(a2.x);
293 acc2 += b1 * vec4(a2.y);
294#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
295#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
296 acc3 += b0 * vec4(a3.x);
297 acc3 += b1 * vec4(a3.y);
298#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
299 }
300
zhenglin19e91422018-01-03 12:14:13 +0800301 for(; int(CURRENT_ITEM_OFFSET(src0_iter)) < end_row_vec_a; TENSOR_ITERATOR_ADVANCE(src0_iter, 1), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, src1_attrs.stride_y))
Anthony Barbier7068f992017-10-26 15:23:08 +0100302 {
303 // Load values from matrix A
zhenglin19e91422018-01-03 12:14:13 +0800304 float a0 = LOAD_CURRENT_ITEM(src0_ptr, src0_iter);
Anthony Barbier7068f992017-10-26 15:23:08 +0100305#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
zhenglin19e91422018-01-03 12:14:13 +0800306 float a1 = LOAD(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
307 //float a1 = 0;
Anthony Barbier7068f992017-10-26 15:23:08 +0100308#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
309#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
zhenglin19e91422018-01-03 12:14:13 +0800310 float a2 = LOAD(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
Anthony Barbier7068f992017-10-26 15:23:08 +0100311#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
312#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
zhenglin19e91422018-01-03 12:14:13 +0800313 float a3 = LOAD(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
Anthony Barbier7068f992017-10-26 15:23:08 +0100314#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
315
zhenglin19e91422018-01-03 12:14:13 +0800316 vec4 b0 = VLOAD4_CURRENT_ITEM(vec4, src1_ptr, src1_iter);
Anthony Barbier7068f992017-10-26 15:23:08 +0100317
318 acc0 += b0 * vec4(a0);
319#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
320 acc1 += b0 * vec4(a1);
321#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
322#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
323 acc2 += b0 * vec4(a2);
324#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
325#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
326 acc3 += b0 * vec4(a3);
327#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
328 }
329
330 /* Multiply by the weight of vector-matrix product */
331 acc0 = acc0 * vec4(ALPHA);
zhenglin19e91422018-01-03 12:14:13 +0800332 VSTORE4_CURRENT_ITEM(dst_ptr, dst_iter, acc0);
Anthony Barbier7068f992017-10-26 15:23:08 +0100333#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
334 acc1 = acc1 * vec4(ALPHA);
zhenglin19e91422018-01-03 12:14:13 +0800335 VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), acc1);
Anthony Barbier7068f992017-10-26 15:23:08 +0100336#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
337#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
338 acc2 = acc2 * vec4(ALPHA);
zhenglin19e91422018-01-03 12:14:13 +0800339 VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), acc2);
Anthony Barbier7068f992017-10-26 15:23:08 +0100340#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
341#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
342 acc3 = acc3 * vec4(ALPHA);
zhenglin19e91422018-01-03 12:14:13 +0800343 VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), acc3);
Anthony Barbier7068f992017-10-26 15:23:08 +0100344#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
345}
346#endif /* GEMM_MM_FLOATING_POINT */
347
348#ifdef GEMM_MATRIXADDITION
Anthony Barbier7068f992017-10-26 15:23:08 +0100349/** This OpenGL ES kernel performs the in-place matrix addition between 2 matrices taking into account that the second matrix might be weighted by a scalar value beta:
350 *
351 * @attention The beta's value need to be passed at compile time using BETA
352 *
zhenglin19e91422018-01-03 12:14:13 +0800353 * @param[in] src_ptr Pointer to the source matrix. Supported data types: F32
354 * @param[in] src_attrs The attributes of the source matrix
355 * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
356 * @param[in] dst_attrs The attributes of the destination matrix
Anthony Barbier7068f992017-10-26 15:23:08 +0100357 */
zhenglin19e91422018-01-03 12:14:13 +0800358SHADER_PARAMS_DECLARATION
359{
360 ImageAttributes src_attrs;
361 ImageAttributes dst_attrs;
362};
363TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
364TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, restrict);
365
Anthony Barbier7068f992017-10-26 15:23:08 +0100366void main(void)
367{
368 /* Compute source and destination addresses */
zhenglin19e91422018-01-03 12:14:13 +0800369 ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
370 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
Anthony Barbier7068f992017-10-26 15:23:08 +0100371
372 /* Load values from A x B */
zhenglin19e91422018-01-03 12:14:13 +0800373 vec4 alpha_ab = VLOAD4_CURRENT_ITEM(vec4, dst_ptr, dst_iter);
374 vec4 c = VLOAD4_CURRENT_ITEM(vec4, src_ptr, src_iter);
Anthony Barbier7068f992017-10-26 15:23:08 +0100375
376 /* Computes alpha * axb + beta * c */
zhenglin19e91422018-01-03 12:14:13 +0800377 vec4 out1 = alpha_ab + vec4(float(BETA) * c);
Anthony Barbier7068f992017-10-26 15:23:08 +0100378
379 /* Store final result in axb matrix */
zhenglin19e91422018-01-03 12:14:13 +0800380 VSTORE4_CURRENT_ITEM(dst_ptr, dst_iter, out1);
Anthony Barbier7068f992017-10-26 15:23:08 +0100381}
382#endif /* GEMM_MATRIXADDITION */
zhenglin19e91422018-01-03 12:14:13 +0800383
Anthony Barbier7068f992017-10-26 15:23:08 +0100384#elif defined(DATA_TYPE_FP16)
zhenglin19e91422018-01-03 12:14:13 +0800385
Stephen Lie855c232018-01-04 14:13:22 +0800386#ifdef GEMM_TRANSPOSE1xW
387/** This OpenGL ES kernel computes the "vector" 1x8 transposition of input matrix
388 *
389 * @param[in] src_ptr Pointer to the source matrix. Supported data types: F16
390 * @param[in] src_attrs The attributes of the source matrix
391 * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
392 * @param[in] dst_attrs The attributes of the destination matrix
393 */
394SHADER_PARAMS_DECLARATION
395{
396 ImageAttributes src_attrs;
397 ImageAttributes dst_attrs;
398};
399TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
400TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
401
402void main(void)
403{
404 /* Compute address for Matrix B - source */
405 ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
406 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
407
408 /* Compute address for Matrix B transposed - destination. X and Y are swapped */
409 TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, gl_GlobalInvocationID.y * uint(16) + gl_GlobalInvocationID.x * dst_attrs.stride_y);
410
411 STORE_CURRENT_ITEM(dst_ptr, dst_iter, LOAD_CURRENT_ITEM(src_ptr, src_iter));
412}
413#endif /* GEMM_TRANSPOSE1xW */
414
415#ifdef GEMM_INTERLEAVE4x4
416/** This OpenGLES kernel reshapes the input matrix interleaving the values
417 *
418 * @param[in] src_ptr Pointer to the source matrix. Supported data types: F16
419 * @param[in] src_attrs The attributes of the source matrix
420 * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src_ptr
421 * @param[in] dst_attrs The attributes of the destination matrix
422 */
423SHADER_PARAMS_DECLARATION
424{
425 ImageAttributes src_attrs;
426 ImageAttributes dst_attrs;
427};
428TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
429TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
430
431void main(void)
432{
433 /* Compute source and destination addresses */
434 ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
435 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
436
437 vec4 s0[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src_ptr, src_iter);
438 vec4 s1[2] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
439 vec4 s2[2] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
440 vec4 s3[2] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 3));
441
442 vec4 s[2];
443 s[0] = vec4(s0[0].x, s1[0].x, s2[0].x, s3[0].x);
444 s[1] = vec4(s0[0].y, s1[0].y, s2[0].y, s3[0].y);
445 STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, s);
446
447 s[0] = vec4(s0[0].z, s1[0].z, s2[0].z, s3[0].z);
448 s[1] = vec4(s0[0].w, s1[0].w, s2[0].w, s3[0].w);
449 STORE_PACK8_HALF(dst_ptr, TENSOR_OFFSET_ADVANCE(dst_iter, 1u), s);
450
451 s[0] = vec4(s0[1].x, s1[1].x, s2[1].x, s3[1].x);
452 s[1] = vec4(s0[1].y, s1[1].y, s2[1].y, s3[1].y);
453 STORE_PACK8_HALF(dst_ptr, TENSOR_OFFSET_ADVANCE(dst_iter, 2u), s);
454
455 s[0] = vec4(s0[1].z, s1[1].z, s2[1].z, s3[1].z);
456 s[1] = vec4(s0[1].w, s1[1].w, s2[1].w, s3[1].w);
457 STORE_PACK8_HALF(dst_ptr, TENSOR_OFFSET_ADVANCE(dst_iter, 3u), s);
458}
459#endif /* GEMM_INTERLEAVE4x4 */
460
Anthony Barbier7068f992017-10-26 15:23:08 +0100461#ifdef GEMM_MM_FLOATING_POINT
zhenglin19e91422018-01-03 12:14:13 +0800462/** This OpenGL ES kernel computes the matrix multiplication between matrix A(src0) and matrix B(src1)
463 * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_16bit and @ref gemm_transpose1x4 before running the matrix multiplication
Anthony Barbier7068f992017-10-26 15:23:08 +0100464 *
465 * @attention The width of matrix B and the alpha's value need to be passed at compile time using WIDTH_MATRIX_B and ALPHA
466 *
zhenglin19e91422018-01-03 12:14:13 +0800467 * @param[in] src0_ptr Pointer to the source matrix.Supported data types: F16
468 * @param[in] src0_attrs The attributes of the source matrix
469 * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
470 * @param[in] src1_attrs The attributes of the source matrix
471 * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
472 * @param[in] dst_attrs The attributes of the destination matrix
Anthony Barbier7068f992017-10-26 15:23:08 +0100473 */
zhenglin19e91422018-01-03 12:14:13 +0800474SHADER_PARAMS_DECLARATION
475{
476 ImageAttributes src0_attrs;
477 ImageAttributes src1_attrs;
478 ImageAttributes dst_attrs;
479};
480
481#if defined(MM_PROCESS_4X)
482TENSOR_DECLARATION(1, src0Buffer, uint, src0_ptr, src0_shift, 2, readonly);
483TENSOR_DECLARATION(2, src1Buffer, uvec2, src1_ptr, src1_shift, 3, readonly);
484TENSOR_DECLARATION(3, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
485
Anthony Barbier7068f992017-10-26 15:23:08 +0100486void main()
487{
zhenglin19e91422018-01-03 12:14:13 +0800488 ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
489 ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
490 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
Anthony Barbier7068f992017-10-26 15:23:08 +0100491
492 int idx = int(gl_GlobalInvocationID.x) * int(NUM_ELEMS_PROCESSED_PER_THREAD_X);
493 /* Compute the address for the vector A and matrix B */
zhenglin19e91422018-01-03 12:14:13 +0800494 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * src0_attrs.stride_y * uint(NUM_ELEMS_PROCESSED_PER_THREAD_Y));
495 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(idx) * src1_attrs.stride_x);
Anthony Barbier7068f992017-10-26 15:23:08 +0100496
497 /* Compute end row address for matrix A */
zhenglin19e91422018-01-03 12:14:13 +0800498 uint end_row_vec_a = uint(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) + uint(COLS_A << 1);
Anthony Barbier7068f992017-10-26 15:23:08 +0100499
500 /* Reset accumulators */
501 vec4 acc0 = vec4(0.0f);
Frank Leib9d38ee2017-12-05 10:43:33 +0800502#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
503 vec4 acc1 = vec4(0.0f);
504#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
505#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
506 vec4 acc2 = vec4(0.0f);
507#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
508#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
509 vec4 acc3 = vec4(0.0f);
510#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
Anthony Barbier7068f992017-10-26 15:23:08 +0100511
zhenglin2219dea2018-01-30 18:15:52 +0800512 for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) <= int(end_row_vec_a - uint(4));
zhenglin19e91422018-01-03 12:14:13 +0800513 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, 2 * 2), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(2) * src1_attrs.stride_y))
Anthony Barbier7068f992017-10-26 15:23:08 +0100514 {
zhenglin19e91422018-01-03 12:14:13 +0800515 vec2 a0 = LOAD_UNPACK2_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
Frank Leib9d38ee2017-12-05 10:43:33 +0800516#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
zhenglin19e91422018-01-03 12:14:13 +0800517 vec2 a1 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
Frank Leib9d38ee2017-12-05 10:43:33 +0800518#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
519#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
zhenglin19e91422018-01-03 12:14:13 +0800520 vec2 a2 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
Frank Leib9d38ee2017-12-05 10:43:33 +0800521#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
522#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
zhenglin19e91422018-01-03 12:14:13 +0800523 vec2 a3 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
Frank Leib9d38ee2017-12-05 10:43:33 +0800524#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
Anthony Barbier7068f992017-10-26 15:23:08 +0100525
zhenglin19e91422018-01-03 12:14:13 +0800526 vec4 b0 = LOAD_UNPACK4_CURRENT_ITEM_HALF(src1_ptr, src1_iter);
527 vec4 b1 = LOAD_UNPACK4_HALF(src1_ptr, IMAGE_OFFSET(src1_iter, 0, 1));
Anthony Barbier7068f992017-10-26 15:23:08 +0100528
529 acc0 += b0 * vec4(a0.x);
530 acc0 += b1 * vec4(a0.y);
Frank Leib9d38ee2017-12-05 10:43:33 +0800531#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
532 acc1 += b0 * vec4(a1.x);
533 acc1 += b1 * vec4(a1.y);
534#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
535#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
536 acc2 += b0 * vec4(a2.x);
537 acc2 += b1 * vec4(a2.y);
538#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
539#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
540 acc3 += b0 * vec4(a3.x);
541 acc3 += b1 * vec4(a3.y);
542#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
Anthony Barbier7068f992017-10-26 15:23:08 +0100543 }
544
zhenglin19e91422018-01-03 12:14:13 +0800545 for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) < int(end_row_vec_a); TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, 2 * 2), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, src1_attrs.stride_y))
Anthony Barbier7068f992017-10-26 15:23:08 +0100546 {
zhenglin19e91422018-01-03 12:14:13 +0800547 vec2 a0 = LOAD_UNPACK2_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
Frank Leib9d38ee2017-12-05 10:43:33 +0800548#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
zhenglin19e91422018-01-03 12:14:13 +0800549 vec2 a1 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
Frank Leib9d38ee2017-12-05 10:43:33 +0800550#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
551#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
zhenglin2219dea2018-01-30 18:15:52 +0800552 vec2 a2 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
Frank Leib9d38ee2017-12-05 10:43:33 +0800553#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
554#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
zhenglin19e91422018-01-03 12:14:13 +0800555 vec2 a3 = LOAD_UNPACK2_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
Frank Leib9d38ee2017-12-05 10:43:33 +0800556#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
Anthony Barbier7068f992017-10-26 15:23:08 +0100557
zhenglin19e91422018-01-03 12:14:13 +0800558 vec4 b0 = LOAD_UNPACK4_CURRENT_ITEM_HALF(src1_ptr, src1_iter);
Anthony Barbier7068f992017-10-26 15:23:08 +0100559
560 acc0 += b0 * (a0.x);
Frank Leib9d38ee2017-12-05 10:43:33 +0800561#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
562 acc1 += b0 * (a1.x);
563#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
564#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
565 acc2 += b0 * (a2.x);
566#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
567#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
568 acc3 += b0 * (a3.x);
569#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
Anthony Barbier7068f992017-10-26 15:23:08 +0100570 }
571
572 /* Multiply by the weight of vector-matrix product */
573 acc0 = acc0 * vec4(ALPHA);
574
zhenglin19e91422018-01-03 12:14:13 +0800575 STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, acc0);
Frank Leib9d38ee2017-12-05 10:43:33 +0800576#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
zhenglin19e91422018-01-03 12:14:13 +0800577 STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), acc1);
Frank Leib9d38ee2017-12-05 10:43:33 +0800578#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
579#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
zhenglin19e91422018-01-03 12:14:13 +0800580 STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), acc2);
Frank Leib9d38ee2017-12-05 10:43:33 +0800581#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
582#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
zhenglin19e91422018-01-03 12:14:13 +0800583 STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), acc3);
Frank Leib9d38ee2017-12-05 10:43:33 +0800584#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
Anthony Barbier7068f992017-10-26 15:23:08 +0100585}
Frank Leib9d38ee2017-12-05 10:43:33 +0800586#elif defined(MM_PROCESS_4X_OPTIMIZED) /* PROCESS_4X */
zhenglin19e91422018-01-03 12:14:13 +0800587TENSOR_DECLARATION(1, src0Buffer, uvec4, src0_ptr, src0_shift, 4, readonly);
588TENSOR_DECLARATION(2, src1Buffer, uvec2, src1_ptr, src1_shift, 3, readonly);
589TENSOR_DECLARATION(3, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
Frank Leib9d38ee2017-12-05 10:43:33 +0800590
Frank Leib9d38ee2017-12-05 10:43:33 +0800591void main()
592{
zhenglin19e91422018-01-03 12:14:13 +0800593 ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
594 ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
595 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
Frank Leib9d38ee2017-12-05 10:43:33 +0800596
597 int idx = int(gl_GlobalInvocationID.x) * int(NUM_ELEMS_PROCESSED_PER_THREAD_X);
598 /* Compute the address for the vector A and matrix B */
zhenglin19e91422018-01-03 12:14:13 +0800599 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * src0_attrs.stride_y * uint(NUM_ELEMS_PROCESSED_PER_THREAD_Y));
600 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(idx) * src1_attrs.stride_x);
Frank Leib9d38ee2017-12-05 10:43:33 +0800601
602 /* Compute end row address for matrix A */
zhenglin19e91422018-01-03 12:14:13 +0800603 uint end_row_vec_a = uint(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) + uint(COLS_A << 1);
Frank Leib9d38ee2017-12-05 10:43:33 +0800604
605 /* Reset accumulators */
606 vec4 acc0 = vec4(0.0f);
607
608#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
609 vec4 acc1 = vec4(0.0f);
610#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
611#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
612 vec4 acc2 = vec4(0.0f);
613#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
614#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
615 vec4 acc3 = vec4(0.0f);
616#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
617
zhenglin2219dea2018-01-30 18:15:52 +0800618 for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) <= int(end_row_vec_a - uint(16));
zhenglin19e91422018-01-03 12:14:13 +0800619 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(8) * src0_attrs.stride_x), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(8) * src1_attrs.stride_y))
Frank Leib9d38ee2017-12-05 10:43:33 +0800620 {
zhenglin19e91422018-01-03 12:14:13 +0800621 vec4 a0[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
Frank Leib9d38ee2017-12-05 10:43:33 +0800622
623#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
zhenglin19e91422018-01-03 12:14:13 +0800624 vec4 a1[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
Frank Leib9d38ee2017-12-05 10:43:33 +0800625#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
626#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
zhenglin19e91422018-01-03 12:14:13 +0800627 vec4 a2[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
Frank Leib9d38ee2017-12-05 10:43:33 +0800628#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
629#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
zhenglin19e91422018-01-03 12:14:13 +0800630 vec4 a3[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
Frank Leib9d38ee2017-12-05 10:43:33 +0800631#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
632
zhenglin19e91422018-01-03 12:14:13 +0800633 vec4 b;
Frank Leib9d38ee2017-12-05 10:43:33 +0800634
635 for(int i = 0; i < 8; i++)
636 {
637 int j = i >> 2;
638 int k = i % 4;
639
zhenglin19e91422018-01-03 12:14:13 +0800640 b = LOAD_UNPACK4_HALF(src1_ptr, IMAGE_OFFSET(src1_iter, 0, i));
Frank Leib9d38ee2017-12-05 10:43:33 +0800641
642 acc0 += b * vec4(a0[j][k]);
643#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
644 acc1 += b * vec4(a1[j][k]);
645#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
646#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
647 acc2 += b * vec4(a2[j][k]);
648#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
649#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
650 acc3 += b * vec4(a3[j][k]);
651#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
652 }
653 }
654
zhenglin19e91422018-01-03 12:14:13 +0800655 for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) < int(end_row_vec_a); TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, 2 * 8), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(8) * src1_attrs.stride_y))
Frank Leib9d38ee2017-12-05 10:43:33 +0800656 {
zhenglin19e91422018-01-03 12:14:13 +0800657 vec4 a0[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
Frank Leib9d38ee2017-12-05 10:43:33 +0800658
659#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
zhenglin19e91422018-01-03 12:14:13 +0800660 vec4 a1[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 1));
Frank Leib9d38ee2017-12-05 10:43:33 +0800661#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
662#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
zhenglin19e91422018-01-03 12:14:13 +0800663 vec4 a2[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 2));
Frank Leib9d38ee2017-12-05 10:43:33 +0800664#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
665#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
zhenglin19e91422018-01-03 12:14:13 +0800666 vec4 a3[2] = LOAD_UNPACK8_HALF(src0_ptr, IMAGE_OFFSET(src0_iter, 0, 3));
Frank Leib9d38ee2017-12-05 10:43:33 +0800667#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
668
zhenglin19e91422018-01-03 12:14:13 +0800669 vec4 b;
Frank Leib9d38ee2017-12-05 10:43:33 +0800670
671 int leftover = COLS_A % 8;
672
673 for(int i = 0; i < leftover; i++)
674 {
675 int j = i >> 2;
676 int k = i % 4;
677
zhenglin19e91422018-01-03 12:14:13 +0800678 b = LOAD_UNPACK4_HALF(src1_ptr, IMAGE_OFFSET(src1_iter, 0, i));
Frank Leib9d38ee2017-12-05 10:43:33 +0800679
680 acc0 += b * vec4(a0[j][k]);
681#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
682 acc1 += b * vec4(a1[j][k]);
683#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
684#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
685 acc2 += b * vec4(a2[j][k]);
686#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
687#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
688 acc3 += b * vec4(a3[j][k]);
689#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
690 }
691 }
692
693 /* Multiply by the weight of vector-matrix product */
694 acc0 = acc0 * vec4(ALPHA);
695
zhenglin19e91422018-01-03 12:14:13 +0800696 STORE_PACK4_CURRENT_ITEM_HALF(dst_ptr, dst_iter, acc0);
Frank Leib9d38ee2017-12-05 10:43:33 +0800697#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
zhenglin19e91422018-01-03 12:14:13 +0800698 STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), acc1);
Frank Leib9d38ee2017-12-05 10:43:33 +0800699#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 1
700#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
zhenglin19e91422018-01-03 12:14:13 +0800701 STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), acc2);
Frank Leib9d38ee2017-12-05 10:43:33 +0800702#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 2
703#if NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
zhenglin19e91422018-01-03 12:14:13 +0800704 STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), acc3);
Frank Leib9d38ee2017-12-05 10:43:33 +0800705#endif // NUM_ELEMS_PROCESSED_PER_THREAD_Y > 3
706}
zhenglin19e91422018-01-03 12:14:13 +0800707#elif defined(MM_PROCESS_8X) /* PROCESS_8X */
708TENSOR_DECLARATION(1, src0Buffer, uvec4, src0_ptr, src0_shift, 4, readonly);
709TENSOR_DECLARATION(2, src1Buffer, uvec4, src1_ptr, src1_shift, 4, readonly);
710TENSOR_DECLARATION(3, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
Frank Leib9d38ee2017-12-05 10:43:33 +0800711
Frank Leib9d38ee2017-12-05 10:43:33 +0800712void main()
713{
zhenglin19e91422018-01-03 12:14:13 +0800714 ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
715 ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
716 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
Frank Leib9d38ee2017-12-05 10:43:33 +0800717
718 int idx = int(gl_GlobalInvocationID.x) * int(NUM_ELEMS_PROCESSED_PER_THREAD_X);
719 /* Compute the address for the vector A and matrix B */
zhenglin19e91422018-01-03 12:14:13 +0800720 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * src0_attrs.stride_y * uint(NUM_ELEMS_PROCESSED_PER_THREAD_Y));
721 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(idx) * src1_attrs.stride_x);
Frank Leib9d38ee2017-12-05 10:43:33 +0800722
723 /* Compute end row address for matrix A */
zhenglin19e91422018-01-03 12:14:13 +0800724 uint end_row_vec_a = uint(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) + uint(COLS_A << 1);
Frank Leib9d38ee2017-12-05 10:43:33 +0800725
726 /* Reset accumulators */
727 vec4 acc[2];
728
729 acc[0] = vec4(0.0f);
730 acc[1] = vec4(0.0f);
731
zhenglin2219dea2018-01-30 18:15:52 +0800732 for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) <= int(end_row_vec_a - uint(16));
zhenglin19e91422018-01-03 12:14:13 +0800733 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(8) * src0_attrs.stride_x), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(8) * src1_attrs.stride_y))
Frank Leib9d38ee2017-12-05 10:43:33 +0800734 {
zhenglin19e91422018-01-03 12:14:13 +0800735 vec4 a[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
736 vec4 b[2];
Frank Leib9d38ee2017-12-05 10:43:33 +0800737
738 for(int i = 0; i < 8; i++)
739 {
740 int j = i >> 2;
741 int k = i % 4;
742
zhenglin19e91422018-01-03 12:14:13 +0800743 b = LOAD_UNPACK8_HALF(src1_ptr, IMAGE_OFFSET(src1_iter, 0, i));
Frank Leib9d38ee2017-12-05 10:43:33 +0800744
745 acc[0] += b[0] * vec4(a[j][k]);
746 acc[1] += b[1] * vec4(a[j][k]);
747 }
748 }
749
zhenglin19e91422018-01-03 12:14:13 +0800750 for(; int(CURRENT_ITEM_OFFSET_IN_BYTES(src0_iter)) < int(end_row_vec_a);
751 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(8) * uint(2)), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(8) * src1_attrs.stride_y))
Frank Leib9d38ee2017-12-05 10:43:33 +0800752 {
zhenglin19e91422018-01-03 12:14:13 +0800753 vec4 a[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
754 vec4 b[2];
Frank Leib9d38ee2017-12-05 10:43:33 +0800755
756 int leftover = COLS_A % 8;
757
758 for(int i = 0; i < leftover; i++)
759 {
760 int j = i >> 2;
761 int k = i % 4;
762
zhenglin19e91422018-01-03 12:14:13 +0800763 b = LOAD_UNPACK8_HALF(src1_ptr, IMAGE_OFFSET(src1_iter, 0, i));
Frank Leib9d38ee2017-12-05 10:43:33 +0800764
765 acc[0] += b[0] * vec4(a[j][k]);
766 acc[1] += b[1] * vec4(a[j][k]);
767 }
768 }
769
770 /* Multiply by the weight of vector-matrix product */
771 acc[0] = acc[0] * vec4(ALPHA);
772 acc[1] = acc[1] * vec4(ALPHA);
773
zhenglin19e91422018-01-03 12:14:13 +0800774 STORE_PACK8_CURRENT_ITEM_HALF(dst_ptr, dst_iter, acc);
Frank Leib9d38ee2017-12-05 10:43:33 +0800775}
zhenglin19e91422018-01-03 12:14:13 +0800776#endif /* PROCESS_8X */
Frank Leib9d38ee2017-12-05 10:43:33 +0800777#endif /* GEMM_MM_FLOATING_POINT */
Anthony Barbier7068f992017-10-26 15:23:08 +0100778
779#ifdef GEMM_ACCUMULATE_BIASES
Frank Leib9d38ee2017-12-05 10:43:33 +0800780#if defined(ACCUM_PROCESS_4X)
Anthony Barbier7068f992017-10-26 15:23:08 +0100781/** This kernel accumulates each row with the biases vector
782 *
zhenglin19e91422018-01-03 12:14:13 +0800783 * @param[in, out] accum_ptr Pointer to the accumulate tensor. Supported data type: F16
784 * @param[in] accum_attrs The attributes of the accumulate tensor
785 * @param[in] biases_ptr Pointer to the biases vector. Same as @p accum_ptr
786 * @param[in] biases_attrs The attributes of the biases tensor
Anthony Barbier7068f992017-10-26 15:23:08 +0100787 */
zhenglin19e91422018-01-03 12:14:13 +0800788SHADER_PARAMS_DECLARATION
789{
790 ImageAttributes accum_attrs;
791 VectorAttributes biases_attrs;
792};
793
794TENSOR_DECLARATION(1, accumBuffer, uvec2, accum_ptr, accum_shift, 3, restrict);
795TENSOR_DECLARATION(2, biasesBuffer, uvec2, biases_ptr, biases_shift, 3, readonly);
796
Anthony Barbier7068f992017-10-26 15:23:08 +0100797void main(void)
798{
zhenglin19e91422018-01-03 12:14:13 +0800799 ImageIterator accum_iter = CONVERT_TO_IMAGE_ITERATOR(accum_attrs, accum_shift);
800 VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR(biases_attrs, biases_shift);
Anthony Barbier7068f992017-10-26 15:23:08 +0100801
zhenglin19e91422018-01-03 12:14:13 +0800802 vec4 u[2];
803 u[0] = LOAD_UNPACK4_CURRENT_ITEM_HALF(accum_ptr, accum_iter);
804 u[1] = LOAD_UNPACK4_CURRENT_ITEM_HALF(biases_ptr, biases_iter);
Anthony Barbier7068f992017-10-26 15:23:08 +0100805
806 vec4 tmp;
zhenglin19e91422018-01-03 12:14:13 +0800807 tmp = u[0] + u[1];
808 STORE_PACK4_CURRENT_ITEM_HALF(accum_ptr, accum_iter, tmp);
Anthony Barbier7068f992017-10-26 15:23:08 +0100809}
zhenglin19e91422018-01-03 12:14:13 +0800810#elif defined(ACCUM_PROCESS_8X) /* ACCUM_PROCESS_8X */
811SHADER_PARAMS_DECLARATION
Frank Leib9d38ee2017-12-05 10:43:33 +0800812{
zhenglin19e91422018-01-03 12:14:13 +0800813 ImageAttributes accum_attrs;
814 VectorAttributes biases_attrs;
Frank Leib9d38ee2017-12-05 10:43:33 +0800815};
816
zhenglin19e91422018-01-03 12:14:13 +0800817TENSOR_DECLARATION(1, accumBuffer, uvec4, accum_ptr, accum_shift, 4, restrict);
818TENSOR_DECLARATION(2, biasesBuffer, uvec4, biases_ptr, biases_shift, 4, readonly);
819
Frank Leib9d38ee2017-12-05 10:43:33 +0800820void main(void)
821{
zhenglin19e91422018-01-03 12:14:13 +0800822 ImageIterator accum_iter = CONVERT_TO_IMAGE_ITERATOR(accum_attrs, accum_shift);
823 VectorIterator biases_iter = CONVERT_TO_VECTOR_ITERATOR(biases_attrs, biases_shift);
Frank Leib9d38ee2017-12-05 10:43:33 +0800824
zhenglin19e91422018-01-03 12:14:13 +0800825 vec4 u[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(accum_ptr, accum_iter);
zhenglin2219dea2018-01-30 18:15:52 +0800826 vec4 v[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(biases_ptr, biases_iter);
Frank Leib9d38ee2017-12-05 10:43:33 +0800827
828 vec4 r[2];
zhenglin19e91422018-01-03 12:14:13 +0800829 r[0] = u[0] + v[0];
830 r[1] = u[1] + v[1];
831 STORE_PACK8_CURRENT_ITEM_HALF(accum_ptr, accum_iter, r);
Frank Leib9d38ee2017-12-05 10:43:33 +0800832}
zhenglin19e91422018-01-03 12:14:13 +0800833#endif /* ACCUM_PROCESS_8X */
Frank Leib9d38ee2017-12-05 10:43:33 +0800834#endif /* GEMM_ACCUMULATE_BIASES */
Stephen Lie855c232018-01-04 14:13:22 +0800835
836#ifdef GEMM_MM_INTERLEAVED_TRANSPOSED
837/** This OpenGL ES kernel is optimised for Midgard. It computes the matrix multiplication between matrix A (src0) and matrix B (src1)
838 * Matrix A and matrix B must be reshaped respectively with @ref gemm_interleave4x4_32bit and @ref gemm_transpose1x4 before running the matrix multiplication
839 *
840 * @attention The width of matrix B and the alpha's value need to be passed at compile time using WIDTH_MATRIX_B and ALPHA
841 *
842 * @param[in] src0_ptr Pointer to the source matrix. Supported data types: F16
843 * @param[in] src0_attrs The attributes of the source matrix
844 * @param[in] src1_ptr Pointer to the source matrix. Supported data types: same as @p src0_ptr
845 * @param[in] src1_attrs The attributes of the source matrix
846 * @param[out] dst_ptr Pointer to the destination matrix Supported data types: same as @p src0_ptr
847 * @param[in] dst_attrs The attributes of the destination matrix
848 */
849SHADER_PARAMS_DECLARATION
850{
851 ImageAttributes src0_attrs;
852 ImageAttributes src1_attrs;
853 ImageAttributes dst_attrs;
854};
855TENSOR_DECLARATION(1, src0Buffer, uvec2, src0_ptr, src0_shift, 3, readonly);
856TENSOR_DECLARATION(2, src1Buffer, uvec4, src1_ptr, src1_shift, 4, readonly);
857TENSOR_DECLARATION(3, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
858
859void main()
860{
861 ImageIterator src0_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src0_attrs, src0_shift);
862 ImageIterator src1_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(src1_attrs, src1_shift);
863 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR(dst_attrs, dst_shift);
864
865 /* Compute address for matrix A and B */
866 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, uint(gl_GlobalInvocationID.y) * (src0_attrs.stride_y));
867 TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, uint(gl_GlobalInvocationID.x) * (src1_attrs.stride_y));
868 /* Compute end row address for matrix B */
869 int end_row_mtx_b = (int(CURRENT_ITEM_OFFSET_IN_BYTES(src1_iter)) >> 1) + int(COLS_B);
870
871 /* Reset accumulators */
872 vec4 c00[2];
873 vec4 c10[2];
874 vec4 c20[2];
875 vec4 c30[2];
876 c00[0] = vec4(0.0f);
877 c00[1] = vec4(0.0f);
878 c10[0] = vec4(0.0f);
879 c10[1] = vec4(0.0f);
880 c20[0] = vec4(0.0f);
881 c20[1] = vec4(0.0f);
882 c30[0] = vec4(0.0f);
883 c30[1] = vec4(0.0f);
884
885 // FIXME: loop unrolling really needed for GLES?
886 for(; (int(CURRENT_ITEM_OFFSET_IN_BYTES(src1_iter)) >> 1) <= (end_row_mtx_b - 16); TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, 16), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, 32))
887 {
888 /* Load values from matrix A (interleaved) and matrix B (transposed) */
889 vec4 a0 = LOAD_UNPACK4_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
890 vec4 b0[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src1_ptr, src1_iter);
891
892 c00[0] += vec4(a0.x) * b0[0];
893 c00[1] += vec4(a0.x) * b0[1];
894 c10[0] += vec4(a0.y) * b0[0];
895 c10[1] += vec4(a0.y) * b0[1];
896 c20[0] += vec4(a0.z) * b0[0];
897 c20[1] += vec4(a0.z) * b0[1];
898 c30[0] += vec4(a0.w) * b0[0];
899 c30[1] += vec4(a0.w) * b0[1];
900
901 /* Load values from matrix A (interleaved) and matrix B (transposed) */
902 a0 = LOAD_UNPACK4_HALF(src0_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src0_iter, 8));
903 b0 = LOAD_UNPACK8_HALF(src1_ptr, TENSOR_OFFSET_ADVANCE_IN_BYTES(src1_iter, 16));
904
905 c00[0] += vec4(a0.x) * b0[0];
906 c00[1] += vec4(a0.x) * b0[1];
907 c10[0] += vec4(a0.y) * b0[0];
908 c10[1] += vec4(a0.y) * b0[1];
909 c20[0] += vec4(a0.z) * b0[0];
910 c20[1] += vec4(a0.z) * b0[1];
911 c30[0] += vec4(a0.w) * b0[0];
912 c30[1] += vec4(a0.w) * b0[1];
913 }
914
915 for(; (int(CURRENT_ITEM_OFFSET_IN_BYTES(src1_iter)) >> 1) < end_row_mtx_b; TENSOR_ITERATOR_ADVANCE_IN_BYTES(src0_iter, 8), TENSOR_ITERATOR_ADVANCE_IN_BYTES(src1_iter, 16))
916 {
917 /* Load values from matrix A (interleaved) and matrix B (transposed) */
918 vec4 a0 = LOAD_UNPACK4_CURRENT_ITEM_HALF(src0_ptr, src0_iter);
919 vec4 b0[2] = LOAD_UNPACK8_CURRENT_ITEM_HALF(src1_ptr, src1_iter);
920
921 c00[0] += vec4(a0.x) * b0[0];
922 c00[1] += vec4(a0.x) * b0[1];
923 c10[0] += vec4(a0.y) * b0[0];
924 c10[1] += vec4(a0.y) * b0[1];
925 c20[0] += vec4(a0.z) * b0[0];
926 c20[1] += vec4(a0.z) * b0[1];
927 c30[0] += vec4(a0.w) * b0[0];
928 c30[1] += vec4(a0.w) * b0[1];
929 }
930
931 /* Multiply by the weight of matrix product */
932 c00[0] = c00[0] * vec4(ALPHA);
933 c00[1] = c00[1] * vec4(ALPHA);
934 c10[0] = c10[0] * vec4(ALPHA);
935 c10[1] = c10[1] * vec4(ALPHA);
936 c20[0] = c20[0] * vec4(ALPHA);
937 c20[1] = c20[1] * vec4(ALPHA);
938 c30[0] = c30[0] * vec4(ALPHA);
939 c30[1] = c30[1] * vec4(ALPHA);
940
941 /* Store 4x8 block */
942 STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 0), c00);
943 STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), c10);
944 STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), c20);
945 STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), c30);
946}
947#endif /* GEMM_MM_INTERLEAVED_TRANSPOSED */
948#else /* DATA_TYPE_FP16 */
Anthony Barbier7068f992017-10-26 15:23:08 +0100949#error Data type not supported
Joel Liangf1f3ebd2017-11-10 09:59:19 +0800950#endif /* DATA_TYPE_FP32 */