blob: f975bfb1962b189832877ff89b6f729f4f0b85b0 [file] [log] [blame]
Manuel Bottini10c53f12019-07-17 16:11:53 +01001/*
Michele Di Giorgiod9eaf612020-07-08 11:12:57 +01002 * Copyright (c) 2019 Arm Limited.
Manuel Bottini10c53f12019-07-17 16:11:53 +01003 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/CL/functions/CLLSTMLayerQuantized.h"
25
26#include "tests/CL/CLAccessor.h"
27#include "tests/PaddingCalculator.h"
28#include "tests/Utils.h"
29#include "tests/datasets/LSTMLayerDataset.h"
30#include "tests/framework/Asserts.h"
31#include "tests/framework/Macros.h"
32#include "tests/framework/datasets/Datasets.h"
33#include "tests/validation/Validation.h"
34
35#include <vector>
36
37namespace arm_compute
38{
39namespace test
40{
41namespace validation
42{
43namespace
44{
45template <typename T>
46inline void fill_tensor(CLTensor &tensor, const std::vector<T> &v)
47{
48 tensor.map(true);
49 // Import memory accounting for padding
50 TensorShape t_shape = tensor.info()->tensor_shape();
51 Window window;
52 window.use_tensor_dimensions(t_shape);
53 Iterator out(&tensor, window);
54 execute_window_loop(window, [&](const Coordinates & id)
55 {
56 *reinterpret_cast<T *>(out.ptr()) = v[coord2index(t_shape, id)];
57 },
58 out);
59 tensor.unmap();
60}
61
62template <typename T>
63inline void fill_tensor(SimpleTensor<T> &tensor, const std::vector<T> &v)
64{
65 std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
66}
67
68} // namespace
69
70TEST_SUITE(CL)
71TEST_SUITE(LSTMLayerQuantized)
72
73// *INDENT-OFF*
74// clang-format off
Manuel Bottini07263982019-10-17 18:37:26 +010075TEST_SUITE(IntegrationTestCase)
76TEST_SUITE(MultSmallerEq1)
77TEST_CASE(RunSmall, framework::DatasetMode::PRECOMMIT)
Manuel Bottini10c53f12019-07-17 16:11:53 +010078{
79 const int batch_size = 2;
80 const int input_size = 2;
81 const int output_size = 4;
82
Manuel Bottini10c53f12019-07-17 16:11:53 +010083 QuantizationInfo qasymm(1.f / 128.f, 128);
84 QuantizationInfo qweights(1.f / 128.f, 128);
85 QuantizationInfo qsymm_3(8.f / 32768.f, 0);
86 QuantizationInfo qsymm_4(16.f / 32768.f, 0);
87
88 TensorShape input_shape{ input_size, batch_size };
89 TensorShape input_weights_shape{ input_size, output_size };
90 TensorShape recurrent_weights_shape{ output_size, output_size };
91 TensorShape output_shape{ output_size, batch_size};
92 TensorShape bias_shape{ output_size };
93
94 auto input_to_input_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
95 auto input_to_forget_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
96 auto input_to_cell_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
97 auto input_to_output_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
98 auto recurrent_to_input_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
99 auto recurrent_to_forget_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
100 auto recurrent_to_cell_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
101 auto recurrent_to_output_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
102 auto input_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
103 auto forget_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
104 auto cell_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
105 auto output_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
106
107 // LSTM input
108 auto input = create_tensor<CLTensor>(input_shape, DataType::QASYMM8, 1, qasymm);
109
110 // LSTM output state
111 auto output_state = create_tensor<CLTensor>(output_shape, DataType::QASYMM8, 1, qasymm);
112
113 // LSTM cell state
114 auto cell_state = create_tensor<CLTensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
115
116 CLLSTMLayerQuantized lstmq;
117
118 lstmq.configure(&input, &input_to_input_weights, &input_to_forget_weights, &input_to_cell_weights, &input_to_output_weights,
119 &recurrent_to_input_weights, &recurrent_to_forget_weights, &recurrent_to_cell_weights, &recurrent_to_output_weights,
120 &input_gate_bias, &forget_gate_bias, &cell_gate_bias, &output_gate_bias, &cell_state, &output_state, &cell_state, &output_state);
121
122 input.allocator()->allocate();
123 input_to_input_weights.allocator()->allocate();
124 input_to_forget_weights.allocator()->allocate();
125 input_to_cell_weights.allocator()->allocate();
126 input_to_output_weights.allocator()->allocate();
127 recurrent_to_input_weights.allocator()->allocate();
128 recurrent_to_forget_weights.allocator()->allocate();
129 recurrent_to_cell_weights.allocator()->allocate();
130 recurrent_to_output_weights.allocator()->allocate();
131 input_gate_bias.allocator()->allocate();
132 forget_gate_bias.allocator()->allocate();
133 cell_gate_bias.allocator()->allocate();
134 output_gate_bias.allocator()->allocate();
135 cell_state.allocator()->allocate();
136 output_state.allocator()->allocate();
137
138 // Fill weights and biases
139 fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 47, 168,
140 66, 239,
141 6, 42,
142 237, 236 });
143
144 fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 204, 193,
145 148, 59,
146 113, 17,
147 66, 197 });
148
149 fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 172, 101,
150 184, 209,
151 165, 82,
152 108, 209 });
153
154 fill_tensor(input_to_output_weights, std::vector<uint8_t> { 203, 244,
155 219, 114,
156 130, 16,
157 163, 222 });
158
159 fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 162, 168, 7, 95,
160 91, 155, 108, 216,
161 255, 100, 48, 188,
162 58, 37, 186, 147 });
163
164 fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 46, 58, 47, 170,
165 246, 96, 12, 99,
166 68, 23, 186, 161,
167 237, 164, 89, 6 });
168
169 fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 234, 99, 71, 206,
170 205, 159, 64, 253,
171 191, 148, 116, 8,
172 209, 136, 59, 138 });
173
174 fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 23, 241, 137, 36,
175 206, 5, 227, 56,
176 254, 176, 231, 47,
177 18, 201, 161, 11 });
178
179 fill_tensor(input_gate_bias, std::vector<int> {-103038, 30525, 115255, -38154 });
180 fill_tensor(forget_gate_bias, std::vector<int> { -23428, 126970, 116806, 46307 });
181 fill_tensor(cell_gate_bias, std::vector<int> { 128006, 69949, -42808, 42568 });
182 fill_tensor(output_gate_bias, std::vector<int> { -67066, -53607, 47233, 7300 });
183
184 SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
185
186 // Initialize state
187 fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128,
188 128, 128, 128, 128 });
189 fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0,
190 0, 0, 0, 0 });
191
192 // First input
193 fill_tensor(input, std::vector<uint8_t> { 106, 193,
194 155, 150 });
195
196 fill_tensor(expected_output, std::vector<uint8_t> { 128, 130, 36, 134,
197 128, 131, 35, 133 });
198
199 lstmq.run();
200 validate(CLAccessor(output_state), expected_output);
201
202 // Second input
203 fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 12, 137,
204 128, 131, 10, 136 });
205 lstmq.run();
206 validate(CLAccessor(output_state), expected_output);
207
208 // Third input
209 fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 8, 140,
210 128, 130, 6, 138 });
211 lstmq.run();
212 validate(CLAccessor(output_state), expected_output);
213}
214
Manuel Bottini07263982019-10-17 18:37:26 +0100215TEST_CASE(RunLarge, framework::DatasetMode::PRECOMMIT)
Manuel Bottini10c53f12019-07-17 16:11:53 +0100216{
217 const int batch_size = 16;
218 const int input_size = 8;
219 const int output_size = 8;
220
221
222 QuantizationInfo qasymm(1.f / 128.f, 128);
223 QuantizationInfo qweights(1.f / 128.f, 128);
224 QuantizationInfo qsymm_3(8.f / 32768.f, 0);
225 QuantizationInfo qsymm_4(16.f / 32768.f, 0);
226
227 TensorShape input_shape{ input_size, batch_size };
228 TensorShape input_weights_shape{ input_size, output_size };
229 TensorShape recurrent_weights_shape{ output_size, output_size };
230 TensorShape output_shape{ output_size, batch_size};
231 TensorShape bias_shape{ output_size };
232
233 auto input_to_input_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
234 auto input_to_forget_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
235 auto input_to_cell_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
236 auto input_to_output_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
237 auto recurrent_to_input_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
238 auto recurrent_to_forget_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
239 auto recurrent_to_cell_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
240 auto recurrent_to_output_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
241 auto input_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
242 auto forget_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
243 auto cell_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
244 auto output_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
245
246 // LSTM input
247 auto input = create_tensor<CLTensor>(input_shape, DataType::QASYMM8, 1, qasymm);
248
249 // LSTM output state
250 auto output_state = create_tensor<CLTensor>(output_shape, DataType::QASYMM8, 1, qasymm);
251
252 // LSTM cell state
253 auto cell_state = create_tensor<CLTensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
254
255 CLLSTMLayerQuantized lstmq;
256
257 lstmq.configure(&input, &input_to_input_weights, &input_to_forget_weights, &input_to_cell_weights, &input_to_output_weights,
258 &recurrent_to_input_weights, &recurrent_to_forget_weights, &recurrent_to_cell_weights, &recurrent_to_output_weights,
259 &input_gate_bias, &forget_gate_bias, &cell_gate_bias, &output_gate_bias, &cell_state, &output_state, &cell_state, &output_state);
260
261 input.allocator()->allocate();
262 input_to_input_weights.allocator()->allocate();
263 input_to_forget_weights.allocator()->allocate();
264 input_to_cell_weights.allocator()->allocate();
265 input_to_output_weights.allocator()->allocate();
266 recurrent_to_input_weights.allocator()->allocate();
267 recurrent_to_forget_weights.allocator()->allocate();
268 recurrent_to_cell_weights.allocator()->allocate();
269 recurrent_to_output_weights.allocator()->allocate();
270 input_gate_bias.allocator()->allocate();
271 forget_gate_bias.allocator()->allocate();
272 cell_gate_bias.allocator()->allocate();
273 output_gate_bias.allocator()->allocate();
274 cell_state.allocator()->allocate();
275 output_state.allocator()->allocate();
276
277 // Fill weights and biases
278 fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 141, 89, 200, 180, 46, 50, 87, 128,
279 149, 227, 177, 187, 212, 229, 54, 111,
280 131, 116, 3, 58, 196, 26, 131, 255,
281 22, 106, 216, 69, 239, 12, 232, 207,
282 184, 56, 236, 172, 28, 143, 161, 124,
283 255, 33, 197, 122, 47, 197, 26, 229,
284 91, 79, 11, 160, 26, 80, 100, 36,
285 248, 186, 97, 61, 125, 46, 14, 100, });
286
287 fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 237, 165, 141, 249, 72, 116, 36 , 115,
288 234, 213, 85, 84, 59, 62, 150, 246,
289 182, 102, 158, 214, 182, 183, 94, 11,
290 158, 192, 92, 189, 160, 219, 206, 249,
291 88, 213, 193, 244, 151, 72, 129, 49,
292 239, 83, 106, 9, 169, 187, 125, 171,
293 32, 141, 126, 92, 13, 36, 224, 150,
294 187, 250, 178, 169, 89, 214, 91, 173 });
295
296 fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 93, 103, 226, 139, 185, 252, 129, 171,
297 159, 32, 25, 175, 224, 183, 165, 35,
298 207, 69, 238, 228, 149, 214, 79, 6,
299 5, 66, 102, 14, 19, 111, 36, 143,
300 22, 85, 13, 78, 236, 121, 122, 77,
301 249, 39, 88, 12, 205, 143, 93, 240,
302 167, 89, 188, 50, 73, 69, 201, 251,
303 59, 32, 203, 184, 139, 191, 199, 74});
304
305 fill_tensor(input_to_output_weights, std::vector<uint8_t> { 205, 7, 95, 104, 252, 143, 226, 73,
306 229, 114, 152, 171, 221, 153, 73, 229,
307 153, 165, 223, 239, 100, 38, 172, 211,
308 226, 133, 239, 207, 116, 230, 170, 100,
309 241, 95, 171, 124, 63, 115, 32, 127,
310 141, 239, 53, 193, 201, 53, 104, 178,
311 186, 212, 167, 107, 226, 230, 71, 213,
312 148, 217, 19, 248, 233, 195, 183, 156 });
313
314 fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 147, 112, 140, 103, 3, 255, 17, 49,
315 84, 112, 144, 213, 138, 142, 112, 66,
316 117, 30, 101, 35, 25, 132, 211, 229,
317 183, 208, 102, 16, 38, 85, 101, 152,
318 226, 83, 132, 22, 161, 110, 157, 129,
319 184, 63, 168, 42, 220, 126, 209, 157,
320 5, 88, 243, 83, 249, 19, 226, 209,
321 173, 96, 185, 77, 146, 227, 238, 136 });
322
323
324 fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 52, 132, 92, 200, 213, 32, 213, 37,
325 116, 142, 116, 180, 4, 172, 158, 143,
326 110, 40, 99, 28, 221, 153, 133, 2,
327 247, 144, 198, 100, 20, 15, 221, 196,
328 159, 178, 188, 151, 171, 15, 25, 217,
329 178, 109, 110, 118, 128, 39, 232, 234,
330 184, 214, 177, 13, 56, 6, 28, 252,
331 89, 187, 242, 59, 146, 111, 132, 129});
332
333 fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 70, 44, 137, 29, 36, 127, 1, 241,
334 26, 241, 142, 114, 67, 181, 49, 57,
335 131, 152, 175, 77, 23, 63, 37, 124,
336 150, 113, 95, 103, 110, 201, 69, 97,
337 196, 242, 62, 214, 66, 19, 45, 135,
338 22, 168, 149, 104, 77, 101, 36, 68,
339 170, 116, 222, 100, 109, 1, 154, 18,
340 133, 215, 105, 93, 31, 57, 231, 112 });
341
342
343 fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 45 , 181 , 220 , 219 , 49 , 63 , 49 , 129,
344 7 , 166 , 104 , 114 , 83 , 40 , 1 , 195,
345 245 , 142 , 82 , 232 , 104 , 245 , 82 , 196,
346 111 , 56 , 156 , 9 , 141 , 240 , 180 , 148,
347 247 , 198 , 234 , 137 , 13 , 210 , 161 , 192,
348 196 , 59 , 233 , 184 , 142 , 187 , 140 , 166,
349 2 , 95 , 152 , 46 , 71 , 46 , 113 , 32,
350 175 , 229 , 86 , 87 , 62 , 93 , 74 , 130});
351
352 fill_tensor(input_gate_bias, std::vector<int> { -40040, -106916, -92315, -79123, 45160, -17954, 50962, -63758 });
353 fill_tensor(forget_gate_bias, std::vector<int> { -128514, 8463, -57831, 116977, 106547, -28132, -124557, 44941 });
354 fill_tensor(cell_gate_bias, std::vector<int> { 88388 , 123601, -116148, -13022, 21619, 48926, 57523, 39332 });
355 fill_tensor(output_gate_bias, std::vector<int> { 59485 , -33070, 21386, -100633, -115959, 125768, -56407, 24897 });
356
357 SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
358
359 // Initialize state
360 fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128, 128, 128, 128, 128,
361 128, 128, 128, 128, 128, 128, 128, 128,
362 128, 128, 128, 128, 128, 128, 128, 128,
363 128, 128, 128, 128, 128, 128, 128, 128,
364 128, 128, 128, 128, 128, 128, 128, 128,
365 128, 128, 128, 128, 128, 128, 128, 128,
366 128, 128, 128, 128, 128, 128, 128, 128,
367 128, 128, 128, 128, 128, 128, 128, 128,
368 128, 128, 128, 128, 128, 128, 128, 128,
369 128, 128, 128, 128, 128, 128, 128, 128,
370 128, 128, 128, 128, 128, 128, 128, 128,
371 128, 128, 128, 128, 128, 128, 128, 128,
372 128, 128, 128, 128, 128, 128, 128, 128,
373 128, 128, 128, 128, 128, 128, 128, 128,
374 128, 128, 128, 128, 128, 128, 128, 128,
375 128, 128, 128, 128, 128, 128, 128, 128 });
376
377 fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0, 0, 0, 0, 0,
378 0, 0, 0, 0, 0, 0, 0, 0,
379 0, 0, 0, 0, 0, 0, 0, 0,
380 0, 0, 0, 0, 0, 0, 0, 0,
381 0, 0, 0, 0, 0, 0, 0, 0,
382 0, 0, 0, 0, 0, 0, 0, 0,
383 0, 0, 0, 0, 0, 0, 0, 0,
384 0, 0, 0, 0, 0, 0, 0, 0,
385 0, 0, 0, 0, 0, 0, 0, 0,
386 0, 0, 0, 0, 0, 0, 0, 0,
387 0, 0, 0, 0, 0, 0, 0, 0,
388 0, 0, 0, 0, 0, 0, 0, 0,
389 0, 0, 0, 0, 0, 0, 0, 0,
390 0, 0, 0, 0, 0, 0, 0, 0,
391 0, 0, 0, 0, 0, 0, 0, 0,
392 0, 0, 0, 0, 0, 0, 0, 0});
393
394 // First input
395 fill_tensor(input, std::vector<uint8_t> { 247, 203, 159, 131, 182, 114, 207, 195,
396 48 , 61 , 154, 16, 80, 101, 116, 255,
397 50 , 115 , 45, 186, 75, 212, 98, 48,
398 88 , 146 , 24, 143, 218, 174, 203, 200,
399 239 , 16 , 66, 136, 234, 54, 94, 51,
400 101 , 128 , 220, 213, 164, 82, 137, 255,
401 70 , 165 , 234, 220, 66, 35, 183, 206,
402 39 , 57 , 180, 202, 23, 172, 224, 109,
403 102 , 215 , 186, 82, 215, 147, 85, 187,
404 96 , 249 , 59, 116, 150, 44, 167, 128,
405 34 , 217 , 148, 193, 243, 38, 250, 208,
406 112 , 130 , 208, 29, 16, 122, 20, 92,
407 24 , 72 , 104, 29, 150, 233, 151, 19,
408 158 , 192 , 254, 70, 73, 142, 106, 152,
409 3 , 61 , 24, 135, 212, 9, 80, 234,
410 147 , 246 , 83, 249, 49, 14, 68, 50});
411
412 fill_tensor(expected_output, std::vector<uint8_t> {131, 128, 128, 128, 128, 180, 129, 133,
413 136, 128, 126, 128, 128, 173, 135, 130,
414 160, 128, 128, 128, 128, 138, 132, 129,
415 131, 128, 127, 128, 128, 169, 129, 131,
416 133, 128, 128, 128, 128, 182, 130, 129,
417 131, 128, 128, 128, 128, 163, 129, 130,
418 131, 128, 128, 128, 128, 149, 132, 129,
419 143, 128, 127, 128, 128, 150, 134, 131,
420 134, 128, 128, 128, 128, 167, 130, 130,
421 131, 128, 128, 128, 128, 152, 132, 129,
422 128, 128, 128, 128, 128, 169, 130, 130,
423 173, 128, 128, 128, 128, 148, 139, 130,
424 152, 128, 128, 128, 128, 168, 139, 132,
425 147, 128, 128, 128, 128, 161, 131, 132,
426 130, 128, 128, 128, 128, 159, 134, 128,
427 140, 128, 128, 128, 128, 133, 132, 128 });
428
429 lstmq.run();
430 validate(CLAccessor(output_state), expected_output);
431
432 // Second input
433 fill_tensor(expected_output, std::vector<uint8_t> { 130, 128, 128, 128, 128, 205, 129, 137,
434 135, 128, 127, 128, 128, 190, 137, 132,
435 160, 128, 128, 128, 128, 142, 133, 131,
436 130, 128, 128, 128, 128, 185, 129, 133,
437 132, 128, 128, 128, 128, 198, 131, 130,
438 130, 128, 128, 128, 128, 178, 130, 131,
439 131, 128, 128, 128, 128, 158, 132, 131,
440 142, 128, 127, 128, 128, 158, 135, 134,
441 133, 128, 128, 128, 128, 178, 131, 132,
442 131, 128, 128, 128, 128, 160, 132, 130,
443 128, 128, 128, 128, 128, 190, 131, 131,
444 170, 128, 128, 128, 128, 157, 142, 131,
445 149, 128, 128, 128, 128, 178, 142, 135,
446 145, 128, 128, 128, 129, 173, 132, 135,
447 129, 128, 128, 128, 128, 171, 134, 129,
448 140, 128, 128, 128, 128, 135, 132, 129});
449 lstmq.run();
450 validate(CLAccessor(output_state), expected_output);
451}
Manuel Bottini07263982019-10-17 18:37:26 +0100452TEST_SUITE_END() // MultSmallerEq1
453
454TEST_SUITE(MultGreater1)
455TEST_CASE(RunSmall, framework::DatasetMode::PRECOMMIT)
456{
457 //Input sequence length is 1
458 const int batch_size = 2;
459 const int input_size = 2;
460 const int output_size = 4;
461
462 QuantizationInfo qasymm(1.f / 128.f, 128);
463 QuantizationInfo qweights(1.f / 16.f, 16);
464 QuantizationInfo qsymm_3(8.f / 32768.f, 0);
465 QuantizationInfo qsymm_4(16.f / 32768.f, 0);
466
467 TensorShape input_shape{ input_size, batch_size };
468 TensorShape input_weights_shape{ input_size, output_size };
469 TensorShape recurrent_weights_shape{ output_size, output_size };
470 TensorShape output_shape{ output_size, batch_size};
471 TensorShape bias_shape{ output_size };
472
473 auto input_to_input_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
474 auto input_to_forget_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
475 auto input_to_cell_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
476 auto input_to_output_weights = create_tensor<CLTensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
477 auto recurrent_to_input_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
478 auto recurrent_to_forget_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
479 auto recurrent_to_cell_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
480 auto recurrent_to_output_weights = create_tensor<CLTensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
481 auto input_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
482 auto forget_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
483 auto cell_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
484 auto output_gate_bias = create_tensor<CLTensor>(bias_shape, DataType::S32);
485
486 // LSTM input
487 auto input = create_tensor<CLTensor>(input_shape, DataType::QASYMM8, 1, qasymm);
488
489 // LSTM output state
490 auto output_state = create_tensor<CLTensor>(output_shape, DataType::QASYMM8, 1, qasymm);
491
492 // LSTM cell state
493 auto cell_state = create_tensor<CLTensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
494
495 CLLSTMLayerQuantized lstmq;
496
497 lstmq.configure(&input, &input_to_input_weights, &input_to_forget_weights, &input_to_cell_weights, &input_to_output_weights,
498 &recurrent_to_input_weights, &recurrent_to_forget_weights, &recurrent_to_cell_weights, &recurrent_to_output_weights,
499 &input_gate_bias, &forget_gate_bias, &cell_gate_bias, &output_gate_bias, &cell_state, &output_state, &cell_state, &output_state);
500
501 input.allocator()->allocate();
502 input_to_input_weights.allocator()->allocate();
503 input_to_forget_weights.allocator()->allocate();
504 input_to_cell_weights.allocator()->allocate();
505 input_to_output_weights.allocator()->allocate();
506 recurrent_to_input_weights.allocator()->allocate();
507 recurrent_to_forget_weights.allocator()->allocate();
508 recurrent_to_cell_weights.allocator()->allocate();
509 recurrent_to_output_weights.allocator()->allocate();
510 input_gate_bias.allocator()->allocate();
511 forget_gate_bias.allocator()->allocate();
512 cell_gate_bias.allocator()->allocate();
513 output_gate_bias.allocator()->allocate();
514 cell_state.allocator()->allocate();
515 output_state.allocator()->allocate();
516
517 // Fill weights and biases
518 fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 122, 130,
519 124, 134,
520 120, 122,
521 134, 134 });
522
523 fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 204, 193,
524 148, 59,
525 113, 17,
526 66, 197 });
527
528 fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 172, 101,
529 184, 209,
530 165, 82,
531 108, 209 });
532
533 fill_tensor(input_to_output_weights, std::vector<uint8_t> { 203, 244,
534 219, 114,
535 130, 16,
536 163, 222 });
537
538 fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 162, 168, 7, 95,
539 91, 155, 108, 216,
540 255, 100, 48, 188,
541 58, 37, 186, 147 });
542
543 fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 46, 58, 47, 170,
544 246, 96, 12, 99,
545 68, 23, 186, 161,
546 237, 164, 89, 6 });
547
548 fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 234, 99, 71, 206,
549 205, 159, 64, 253,
550 191, 148, 116, 8,
551 209, 136, 59, 138 });
552
553 fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 23, 241, 137, 36,
554 206, 5, 227, 56,
555 254, 176, 231, 47,
556 18, 201, 161, 11 });
557
558 fill_tensor(input_gate_bias, std::vector<int> {-103038, 30525, 115255, -38154 });
559 fill_tensor(forget_gate_bias, std::vector<int> { -23428, 126970, 116806, 46307 });
560 fill_tensor(cell_gate_bias, std::vector<int> { 128006, 69949, -42808, 42568 });
561 fill_tensor(output_gate_bias, std::vector<int> { -67066, -53607, 47233, 7300 });
562
563 SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
564
565 // Initialize state
566 fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128,
567 128, 128, 128, 128 });
568 fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0,
569 0, 0, 0, 0 });
570
571 // First input
572 fill_tensor(input, std::vector<uint8_t> { 106, 193,
573 155, 150 });
574
575 fill_tensor(expected_output, std::vector<uint8_t> { 128, 128, 31, 128,
576 128, 128, 31, 128 });
577
578 lstmq.run();
579 validate(CLAccessor(output_state), expected_output);
580
581 // Second input
582 fill_tensor(expected_output, std::vector<uint8_t> { 128, 128, 5, 128,
583 128, 128, 5, 128 });
584 lstmq.run();
585 validate(CLAccessor(output_state), expected_output);
586
587 // Third input
588 fill_tensor(expected_output, std::vector<uint8_t> { 128, 128, 1, 128,
589 128, 128, 1, 128, });
590 lstmq.run();
591 validate(CLAccessor(output_state), expected_output);
592}
593TEST_SUITE_END() // MultGreater1
594TEST_SUITE_END() // IntegrationTestCase
Manuel Bottini10c53f12019-07-17 16:11:53 +0100595// clang-format on
596// *INDENT-ON*
597
598TEST_SUITE_END() // LSTMLayerQuantized
Manuel Bottini07263982019-10-17 18:37:26 +0100599TEST_SUITE_END() // CL
Manuel Bottini10c53f12019-07-17 16:11:53 +0100600} // namespace validation
601} // namespace test
602} // namespace arm_compute