blob: 89bf9fbd89dac355ae122327581c69233cb49004 [file] [log] [blame]
Anthony Barbier7068f992017-10-26 15:23:08 +01001/*
zhenglinaa1209a2018-01-08 13:54:52 +08002 * Copyright (c) 2017, 2018 ARM Limited.
Anthony Barbier7068f992017-10-26 15:23:08 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24layout(local_size_x = LOCAL_SIZE_X, local_size_y = LOCAL_SIZE_Y, local_size_z = LOCAL_SIZE_Z) in;
zhenglinaa1209a2018-01-08 13:54:52 +080025
26#include "helpers_cs.h"
27
28#if defined(DATA_TYPE_FP16)
29precision mediump float;
30#endif // DATA_TYPE_FP16
Anthony Barbier7068f992017-10-26 15:23:08 +010031
steli017d473dd2017-12-06 18:53:32 +080032#define SWAP_ROW_func(u0, l0) \
33 { \
34 tmp_swap = u0; \
35 u0 = l0; \
36 l0 = tmp_swap; \
37 }
Anthony Barbier7068f992017-10-26 15:23:08 +010038
steli017d473dd2017-12-06 18:53:32 +080039#define SWAP_4x4_func(u0, u1, u2, u3, l0, l1, l2, l3) \
40 { \
41 vec4 tmp_swap; \
42 SWAP_ROW_func(u0, l0); \
43 SWAP_ROW_func(u1, l1); \
44 SWAP_ROW_func(u2, l2); \
45 SWAP_ROW_func(u3, l3); \
46 }
Anthony Barbier7068f992017-10-26 15:23:08 +010047
steli017d473dd2017-12-06 18:53:32 +080048#define TRANSPOSE_4x4_func(u0, u1, u2, u3) \
49 { \
50 mat4x4 matin, matout; \
51 matin[0] = u0; \
52 matin[1] = u1; \
53 matin[2] = u2; \
54 matin[3] = u3; \
55 matout = transpose(matin); \
56 u0 = matout[0]; \
57 u1 = matout[1]; \
58 u2 = matout[2]; \
59 u3 = matout[3]; \
60 }
Anthony Barbier7068f992017-10-26 15:23:08 +010061
62/** This OpenGL ES kernel computes the matrix transposition of input matrix
63 *
steli017d473dd2017-12-06 18:53:32 +080064 * @note The data type must be passed at compile time using "#define DATA_TYPE_NAME". e.g. "#define DATA_TYPE_FP32"
65 * @note Optimization name must be passed using "#define OPTIMIZATION_NAME" for F16. e.g. "#define TRANSPOSE_8X8"
66 *
zhenglinaa1209a2018-01-08 13:54:52 +080067 * @param[in] src_ptr Pointer to the source matrix. Supported data types: F32/F16
68 * @param[in] src_attrs The attributes of the source matrix
69 * @param[out] dst_ptr Pointer to the destination matrix Supported data type: same as src_ptr
70 * @param[in] dst_attrs The attributes of the destination matrix
Anthony Barbier7068f992017-10-26 15:23:08 +010071 */
zhenglinaa1209a2018-01-08 13:54:52 +080072SHADER_PARAMS_DECLARATION
steli017d473dd2017-12-06 18:53:32 +080073{
zhenglinaa1209a2018-01-08 13:54:52 +080074 ImageAttributes src_attrs;
75 ImageAttributes dst_attrs;
steli017d473dd2017-12-06 18:53:32 +080076};
77
78#ifdef DATA_TYPE_FP32
zhenglinaa1209a2018-01-08 13:54:52 +080079TENSOR_DECLARATION(1, srcBuffer, float, src_ptr, src_shift, 2, readonly);
80TENSOR_DECLARATION(2, dstBuffer, float, dst_ptr, dst_shift, 2, writeonly);
steli017d473dd2017-12-06 18:53:32 +080081
Anthony Barbier7068f992017-10-26 15:23:08 +010082void main(void)
83{
steli017d473dd2017-12-06 18:53:32 +080084 // compute source address
zhenglinaa1209a2018-01-08 13:54:52 +080085 ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
86 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
Anthony Barbier7068f992017-10-26 15:23:08 +010087
steli017d473dd2017-12-06 18:53:32 +080088 // load the NxN block at (x, y)
zhenglinaa1209a2018-01-08 13:54:52 +080089 vec4 u0 = VLOAD4(vec4, src_ptr, IMAGE_OFFSET(src_iter, 0, 0));
90 vec4 u1 = VLOAD4(vec4, src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
91 vec4 u2 = VLOAD4(vec4, src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
92 vec4 u3 = VLOAD4(vec4, src_ptr, IMAGE_OFFSET(src_iter, 0, 3));
Anthony Barbier7068f992017-10-26 15:23:08 +010093
steli017d473dd2017-12-06 18:53:32 +080094 // transpose the block
95 TRANSPOSE_4x4_func(u0, u1, u2, u3);
Anthony Barbier7068f992017-10-26 15:23:08 +010096
steli017d473dd2017-12-06 18:53:32 +080097 // store the block at (y, x)
zhenglinaa1209a2018-01-08 13:54:52 +080098 TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, uint(16) * uint(gl_GlobalInvocationID.y) + uint(4) * uint(gl_GlobalInvocationID.x) * (dst_attrs.stride_y));
Anthony Barbier7068f992017-10-26 15:23:08 +010099
zhenglinaa1209a2018-01-08 13:54:52 +0800100 VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 0), u0);
101 VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), u1);
102 VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), u2);
103 VSTORE4(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), u3);
Anthony Barbier7068f992017-10-26 15:23:08 +0100104}
105
zhenglinaa1209a2018-01-08 13:54:52 +0800106#elif defined(DATA_TYPE_FP16) /* DATA_TYPE_FP16 */
Anthony Barbier7068f992017-10-26 15:23:08 +0100107
Frank Leib9d38ee2017-12-05 10:43:33 +0800108#if defined(TRANSPOSE_4X4)
zhenglinaa1209a2018-01-08 13:54:52 +0800109TENSOR_DECLARATION(1, srcBuffer, uvec2, src_ptr, src_shift, 3, readonly);
110TENSOR_DECLARATION(2, dstBuffer, uvec2, dst_ptr, dst_shift, 3, writeonly);
Frank Leib9d38ee2017-12-05 10:43:33 +0800111
Anthony Barbier7068f992017-10-26 15:23:08 +0100112void main(void)
113{
steli017d473dd2017-12-06 18:53:32 +0800114 // compute source address
zhenglinaa1209a2018-01-08 13:54:52 +0800115 ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
116 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
Anthony Barbier7068f992017-10-26 15:23:08 +0100117
steli017d473dd2017-12-06 18:53:32 +0800118 // load the NxN block at (x, y)
zhenglinaa1209a2018-01-08 13:54:52 +0800119 vec4 u0 = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 0));
120 vec4 u1 = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 1));
121 vec4 u2 = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 2));
122 vec4 u3 = LOAD_UNPACK4_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, 3));
Anthony Barbier7068f992017-10-26 15:23:08 +0100123
steli017d473dd2017-12-06 18:53:32 +0800124 // transpose the block
125 TRANSPOSE_4x4_func(u0, u1, u2, u3);
Anthony Barbier7068f992017-10-26 15:23:08 +0100126
steli017d473dd2017-12-06 18:53:32 +0800127 // store the block at (y, x)
zhenglinaa1209a2018-01-08 13:54:52 +0800128 TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, uint(8) * uint(gl_GlobalInvocationID.y) + uint(gl_GlobalInvocationID.x) * (dst_attrs.step_y));
Anthony Barbier7068f992017-10-26 15:23:08 +0100129
zhenglinaa1209a2018-01-08 13:54:52 +0800130 STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 0), u0);
131 STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 1), u1);
132 STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 2), u2);
133 STORE_PACK4_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, 3), u3);
Anthony Barbier7068f992017-10-26 15:23:08 +0100134}
steli017d473dd2017-12-06 18:53:32 +0800135
zhenglinaa1209a2018-01-08 13:54:52 +0800136#elif defined(TRANSPOSE_8X8) /* TRANSPOSE_8X8 */
137TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
138TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
Frank Leib9d38ee2017-12-05 10:43:33 +0800139
Frank Leib9d38ee2017-12-05 10:43:33 +0800140void main(void)
141{
steli017d473dd2017-12-06 18:53:32 +0800142 // compute source address
zhenglinaa1209a2018-01-08 13:54:52 +0800143 ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
144 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
Frank Leib9d38ee2017-12-05 10:43:33 +0800145
zhenglinaa1209a2018-01-08 13:54:52 +0800146 vec4 u[8][2];
Frank Leib9d38ee2017-12-05 10:43:33 +0800147
148 for(int i = 0; i < 8; i++)
149 {
zhenglinaa1209a2018-01-08 13:54:52 +0800150 u[i] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, i));
Frank Leib9d38ee2017-12-05 10:43:33 +0800151 }
152
steli017d473dd2017-12-06 18:53:32 +0800153 // transpose the block
154 TRANSPOSE_4x4_func(u[0][0], u[1][0], u[2][0], u[3][0]);
155 TRANSPOSE_4x4_func(u[0][1], u[1][1], u[2][1], u[3][1]);
156 TRANSPOSE_4x4_func(u[4][0], u[5][0], u[6][0], u[7][0]);
157 TRANSPOSE_4x4_func(u[4][1], u[5][1], u[6][1], u[7][1]);
158 SWAP_4x4_func(u[0][1], u[1][1], u[2][1], u[3][1], u[4][0], u[5][0], u[6][0], u[7][0]);
Frank Leib9d38ee2017-12-05 10:43:33 +0800159
steli017d473dd2017-12-06 18:53:32 +0800160 // store the block at (y, x)
zhenglinaa1209a2018-01-08 13:54:52 +0800161 TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, uint(16) * uint(gl_GlobalInvocationID.y) + uint(gl_GlobalInvocationID.x) * (dst_attrs.step_y));
Frank Leib9d38ee2017-12-05 10:43:33 +0800162
163 for(int i = 0; i < 8; i++)
164 {
zhenglinaa1209a2018-01-08 13:54:52 +0800165 STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, i), u[i]);
Frank Leib9d38ee2017-12-05 10:43:33 +0800166 }
167}
steli017d473dd2017-12-06 18:53:32 +0800168
zhenglinaa1209a2018-01-08 13:54:52 +0800169#elif defined(TRANSPOSE_8X8_SQUARE) /* TRANSPOSE_8x8_SQUARE */
170TENSOR_DECLARATION(1, srcBuffer, uvec4, src_ptr, src_shift, 4, readonly);
171TENSOR_DECLARATION(2, dstBuffer, uvec4, dst_ptr, dst_shift, 4, writeonly);
steli017d473dd2017-12-06 18:53:32 +0800172
173void main(void)
174{
zhenglinaa1209a2018-01-08 13:54:52 +0800175 ImageIterator src_iter = CONVERT_TO_IMAGE_ITERATOR(src_attrs, src_shift);
176 ImageIterator dst_iter = CONVERT_TO_IMAGE_ITERATOR_NO_STEP(dst_attrs, dst_shift);
steli017d473dd2017-12-06 18:53:32 +0800177
178 if(gl_GlobalInvocationID.x <= gl_GlobalInvocationID.y)
179 {
zhenglinaa1209a2018-01-08 13:54:52 +0800180 uint blk1_offset_in_bytes = CURRENT_ITEM_OFFSET_IN_BYTES(src_iter);
181 TENSOR_ITERATOR_ADVANCE_IN_BYTES(dst_iter, uint(16) * uint(gl_GlobalInvocationID.y) + uint(gl_GlobalInvocationID.x) * (dst_attrs.step_y));
182 uint blk2_offset_in_bytes = CURRENT_ITEM_OFFSET_IN_BYTES(dst_iter);
steli017d473dd2017-12-06 18:53:32 +0800183
184 // load block1
zhenglinaa1209a2018-01-08 13:54:52 +0800185 vec4 u1[8][2];
steli017d473dd2017-12-06 18:53:32 +0800186
zhenglinaa1209a2018-01-08 13:54:52 +0800187 SET_TENSOR_ITERATOR_OFFSET_IN_BYTES(src_iter, blk1_offset_in_bytes);
steli017d473dd2017-12-06 18:53:32 +0800188 for(int i = 0; i < 8; i++)
189 {
zhenglinaa1209a2018-01-08 13:54:52 +0800190 u1[i] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, i));
steli017d473dd2017-12-06 18:53:32 +0800191 }
192
193 // transpose block1
194 TRANSPOSE_4x4_func(u1[0][0], u1[1][0], u1[2][0], u1[3][0]);
195 TRANSPOSE_4x4_func(u1[0][1], u1[1][1], u1[2][1], u1[3][1]);
196 TRANSPOSE_4x4_func(u1[4][0], u1[5][0], u1[6][0], u1[7][0]);
197 TRANSPOSE_4x4_func(u1[4][1], u1[5][1], u1[6][1], u1[7][1]);
198 SWAP_4x4_func(u1[0][1], u1[1][1], u1[2][1], u1[3][1], u1[4][0], u1[5][0], u1[6][0], u1[7][0]);
199
200 // write to block2
zhenglinaa1209a2018-01-08 13:54:52 +0800201 SET_TENSOR_ITERATOR_OFFSET_IN_BYTES(dst_iter, blk2_offset_in_bytes);
steli017d473dd2017-12-06 18:53:32 +0800202 for(int i = 0; i < 8; i++)
203 {
zhenglinaa1209a2018-01-08 13:54:52 +0800204 STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, i), u1[i]);
steli017d473dd2017-12-06 18:53:32 +0800205 }
206
207 // load block2
208 vec4 u2[8][2];
209
zhenglinaa1209a2018-01-08 13:54:52 +0800210 SET_TENSOR_ITERATOR_OFFSET_IN_BYTES(src_iter, blk2_offset_in_bytes);
steli017d473dd2017-12-06 18:53:32 +0800211 for(int i = 0; i < 8; i++)
212 {
zhenglinaa1209a2018-01-08 13:54:52 +0800213 u2[i] = LOAD_UNPACK8_HALF(src_ptr, IMAGE_OFFSET(src_iter, 0, i));
steli017d473dd2017-12-06 18:53:32 +0800214 }
215
216 // transpose block2
217 TRANSPOSE_4x4_func(u2[0][0], u2[1][0], u2[2][0], u2[3][0]);
218 TRANSPOSE_4x4_func(u2[0][1], u2[1][1], u2[2][1], u2[3][1]);
219 TRANSPOSE_4x4_func(u2[4][0], u2[5][0], u2[6][0], u2[7][0]);
220 TRANSPOSE_4x4_func(u2[4][1], u2[5][1], u2[6][1], u2[7][1]);
221 SWAP_4x4_func(u2[0][1], u2[1][1], u2[2][1], u2[3][1], u2[4][0], u2[5][0], u2[6][0], u2[7][0]);
222
223 // write to block1
zhenglinaa1209a2018-01-08 13:54:52 +0800224 SET_TENSOR_ITERATOR_OFFSET_IN_BYTES(dst_iter, blk1_offset_in_bytes);
steli017d473dd2017-12-06 18:53:32 +0800225 for(int i = 0; i < 8; i++)
226 {
zhenglinaa1209a2018-01-08 13:54:52 +0800227 STORE_PACK8_HALF(dst_ptr, IMAGE_OFFSET(dst_iter, 0, i), u2[i]);
steli017d473dd2017-12-06 18:53:32 +0800228 }
229 }
230}
231
Frank Leib9d38ee2017-12-05 10:43:33 +0800232#endif /* TRANSPOSE_4X4 */
steli017d473dd2017-12-06 18:53:32 +0800233
234#endif /* DATA_TYPE_FP32 */