blob: 093516556480151b0b3138ae7cbd08dfcdbe85b0 [file] [log] [blame]
Michalis Spyrouba27e442019-05-28 10:04:57 +01001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouba27e442019-05-28 10:04:57 +010024#include "arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h"
Manuel Bottini10c53f12019-07-17 16:11:53 +010025
Michalis Spyrouba27e442019-05-28 10:04:57 +010026#include "tests/NEON/Accessor.h"
27#include "tests/PaddingCalculator.h"
28#include "tests/Utils.h"
29#include "tests/datasets/LSTMLayerDataset.h"
30#include "tests/framework/Asserts.h"
31#include "tests/framework/Macros.h"
32#include "tests/framework/datasets/Datasets.h"
33#include "tests/validation/Validation.h"
34
35#include <vector>
36
37namespace arm_compute
38{
39namespace test
40{
41namespace validation
42{
43namespace
44{
45template <typename T>
46inline void fill_tensor(Tensor &tensor, const std::vector<T> &v)
47{
48 // Import memory accounting for padding
49 TensorShape t_shape = tensor.info()->tensor_shape();
50 Window window;
51 window.use_tensor_dimensions(t_shape);
52 Iterator out(&tensor, window);
53 execute_window_loop(window, [&](const Coordinates & id)
54 {
55 *reinterpret_cast<T *>(out.ptr()) = v[coord2index(t_shape, id)];
56 },
57 out);
58}
59
60template <typename T>
61inline void fill_tensor(SimpleTensor<T> &tensor, const std::vector<T> &v)
62{
63 std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
64}
65
giuros01b5e75db2019-07-24 16:29:53 +010066/** Tolerance for quantized asymmetric operations */
67#if defined(__aarch64__)
68constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(0);
69#else // defined(__aarch64__)
70constexpr AbsoluteTolerance<int16_t> tolerance_qsymm16(1);
71#endif // defined(__aarch64__)
72
Michalis Spyrouba27e442019-05-28 10:04:57 +010073} // namespace
74
75TEST_SUITE(NEON)
76TEST_SUITE(LSTMLayerQuantized)
77
78// *INDENT-OFF*
79// clang-format off
80TEST_CASE(IntegrationTestCaseSmall, framework::DatasetMode::PRECOMMIT)
81{
82 const int batch_size = 2;
83 const int input_size = 2;
84 const int output_size = 4;
85
86
87 QuantizationInfo qasymm(1.f / 128.f, 128);
88 QuantizationInfo qweights(1.f / 128.f, 128);
89 QuantizationInfo qsymm_3(8.f / 32768.f, 0);
90 QuantizationInfo qsymm_4(16.f / 32768.f, 0);
91
92 TensorShape input_shape{ input_size, batch_size };
93 TensorShape input_weights_shape{ input_size, output_size };
94 TensorShape recurrent_weights_shape{ output_size, output_size };
95 TensorShape output_shape{ output_size, batch_size};
96 TensorShape bias_shape{ output_size };
97
98 auto input_to_input_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
99 auto input_to_forget_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
100 auto input_to_cell_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
101 auto input_to_output_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
102 auto recurrent_to_input_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
103 auto recurrent_to_forget_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
104 auto recurrent_to_cell_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
105 auto recurrent_to_output_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
106 auto input_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
107 auto forget_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
108 auto cell_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
109 auto output_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
110
111 // LSTM input
112 auto input = create_tensor<Tensor>(input_shape, DataType::QASYMM8, 1, qasymm);
113
114 // LSTM output state
115 auto output_state = create_tensor<Tensor>(output_shape, DataType::QASYMM8, 1, qasymm);
116
117 // LSTM cell state
118 auto cell_state = create_tensor<Tensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
119
120 NELSTMLayerQuantized lstmq;
121
122 lstmq.configure(&input, &input_to_input_weights, &input_to_forget_weights, &input_to_cell_weights, &input_to_output_weights,
123 &recurrent_to_input_weights, &recurrent_to_forget_weights, &recurrent_to_cell_weights, &recurrent_to_output_weights,
124 &input_gate_bias, &forget_gate_bias, &cell_gate_bias, &output_gate_bias, &cell_state, &output_state, &cell_state, &output_state);
125
126 input.allocator()->allocate();
127 input_to_input_weights.allocator()->allocate();
128 input_to_forget_weights.allocator()->allocate();
129 input_to_cell_weights.allocator()->allocate();
130 input_to_output_weights.allocator()->allocate();
131 recurrent_to_input_weights.allocator()->allocate();
132 recurrent_to_forget_weights.allocator()->allocate();
133 recurrent_to_cell_weights.allocator()->allocate();
134 recurrent_to_output_weights.allocator()->allocate();
135 input_gate_bias.allocator()->allocate();
136 forget_gate_bias.allocator()->allocate();
137 cell_gate_bias.allocator()->allocate();
138 output_gate_bias.allocator()->allocate();
139 cell_state.allocator()->allocate();
140 output_state.allocator()->allocate();
Michalis Spyrouba27e442019-05-28 10:04:57 +0100141
142 // Fill weights and biases
143 fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 47, 168,
144 66, 239,
145 6, 42,
146 237, 236 });
147
148 fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 204, 193,
149 148, 59,
150 113, 17,
151 66, 197 });
152
153 fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 172, 101,
154 184, 209,
155 165, 82,
156 108, 209 });
157
158 fill_tensor(input_to_output_weights, std::vector<uint8_t> { 203, 244,
159 219, 114,
160 130, 16,
161 163, 222 });
162
163 fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 162, 168, 7, 95,
164 91, 155, 108, 216,
165 255, 100, 48, 188,
166 58, 37, 186, 147 });
167
168 fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 46, 58, 47, 170,
169 246, 96, 12, 99,
170 68, 23, 186, 161,
171 237, 164, 89, 6 });
172
173 fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 234, 99, 71, 206,
174 205, 159, 64, 253,
175 191, 148, 116, 8,
176 209, 136, 59, 138 });
177
178 fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 23, 241, 137, 36,
179 206, 5, 227, 56,
180 254, 176, 231, 47,
181 18, 201, 161, 11 });
182
183 fill_tensor(input_gate_bias, std::vector<int> {-103038, 30525, 115255, -38154 });
184 fill_tensor(forget_gate_bias, std::vector<int> { -23428, 126970, 116806, 46307 });
185 fill_tensor(cell_gate_bias, std::vector<int> { 128006, 69949, -42808, 42568 });
186 fill_tensor(output_gate_bias, std::vector<int> { -67066, -53607, 47233, 7300 });
187
188 SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
189
190 // Initialize state
191 fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128,
192 128, 128, 128, 128 });
193 fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0,
194 0, 0, 0, 0 });
195
196 // First input
197 fill_tensor(input, std::vector<uint8_t> { 106, 193,
198 155, 150 });
199
200 fill_tensor(expected_output, std::vector<uint8_t> { 128, 130, 36, 134,
201 128, 131, 35, 133 });
202
203 lstmq.run();
giuros01b5e75db2019-07-24 16:29:53 +0100204 validate(Accessor(output_state), expected_output, tolerance_qsymm16);
Michalis Spyrouba27e442019-05-28 10:04:57 +0100205
206 // Second input
207 fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 12, 137,
208 128, 131, 10, 136 });
209 lstmq.run();
giuros01b5e75db2019-07-24 16:29:53 +0100210 validate(Accessor(output_state), expected_output, tolerance_qsymm16);
Michalis Spyrouba27e442019-05-28 10:04:57 +0100211
212 // Third input
213 fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 8, 140,
214 128, 130, 6, 138 });
215 lstmq.run();
giuros01b5e75db2019-07-24 16:29:53 +0100216 validate(Accessor(output_state), expected_output, tolerance_qsymm16);
Michalis Spyrouba27e442019-05-28 10:04:57 +0100217}
218
219TEST_CASE(IntegrationTestCaseLarge, framework::DatasetMode::PRECOMMIT)
220{
221 const int batch_size = 16;
222 const int input_size = 8;
223 const int output_size = 8;
224
225
226 QuantizationInfo qasymm(1.f / 128.f, 128);
227 QuantizationInfo qweights(1.f / 128.f, 128);
228 QuantizationInfo qsymm_3(8.f / 32768.f, 0);
229 QuantizationInfo qsymm_4(16.f / 32768.f, 0);
230
231 TensorShape input_shape{ input_size, batch_size };
232 TensorShape input_weights_shape{ input_size, output_size };
233 TensorShape recurrent_weights_shape{ output_size, output_size };
234 TensorShape output_shape{ output_size, batch_size};
235 TensorShape bias_shape{ output_size };
236
237 auto input_to_input_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
238 auto input_to_forget_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
239 auto input_to_cell_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
240 auto input_to_output_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
241 auto recurrent_to_input_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
242 auto recurrent_to_forget_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
243 auto recurrent_to_cell_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
244 auto recurrent_to_output_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
245 auto input_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
246 auto forget_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
247 auto cell_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
248 auto output_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
249
250 // LSTM input
251 auto input = create_tensor<Tensor>(input_shape, DataType::QASYMM8, 1, qasymm);
252
253 // LSTM output state
254 auto output_state = create_tensor<Tensor>(output_shape, DataType::QASYMM8, 1, qasymm);
255
256 // LSTM cell state
257 auto cell_state = create_tensor<Tensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
258
259 NELSTMLayerQuantized lstmq;
260
261 lstmq.configure(&input, &input_to_input_weights, &input_to_forget_weights, &input_to_cell_weights, &input_to_output_weights,
262 &recurrent_to_input_weights, &recurrent_to_forget_weights, &recurrent_to_cell_weights, &recurrent_to_output_weights,
263 &input_gate_bias, &forget_gate_bias, &cell_gate_bias, &output_gate_bias, &cell_state, &output_state, &cell_state, &output_state);
264
265 input.allocator()->allocate();
266 input_to_input_weights.allocator()->allocate();
267 input_to_forget_weights.allocator()->allocate();
268 input_to_cell_weights.allocator()->allocate();
269 input_to_output_weights.allocator()->allocate();
270 recurrent_to_input_weights.allocator()->allocate();
271 recurrent_to_forget_weights.allocator()->allocate();
272 recurrent_to_cell_weights.allocator()->allocate();
273 recurrent_to_output_weights.allocator()->allocate();
274 input_gate_bias.allocator()->allocate();
275 forget_gate_bias.allocator()->allocate();
276 cell_gate_bias.allocator()->allocate();
277 output_gate_bias.allocator()->allocate();
278 cell_state.allocator()->allocate();
279 output_state.allocator()->allocate();
280
281 // Fill weights and biases
282 fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 141, 89, 200, 180, 46, 50, 87, 128,
283 149, 227, 177, 187, 212, 229, 54, 111,
284 131, 116, 3, 58, 196, 26, 131, 255,
285 22, 106, 216, 69, 239, 12, 232, 207,
286 184, 56, 236, 172, 28, 143, 161, 124,
287 255, 33, 197, 122, 47, 197, 26, 229,
288 91, 79, 11, 160, 26, 80, 100, 36,
289 248, 186, 97, 61, 125, 46, 14, 100, });
290
291 fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 237, 165, 141, 249, 72, 116, 36 , 115,
292 234, 213, 85, 84, 59, 62, 150, 246,
293 182, 102, 158, 214, 182, 183, 94, 11,
294 158, 192, 92, 189, 160, 219, 206, 249,
295 88, 213, 193, 244, 151, 72, 129, 49,
296 239, 83, 106, 9, 169, 187, 125, 171,
297 32, 141, 126, 92, 13, 36, 224, 150,
298 187, 250, 178, 169, 89, 214, 91, 173 });
299
300 fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 93, 103, 226, 139, 185, 252, 129, 171,
301 159, 32, 25, 175, 224, 183, 165, 35,
302 207, 69, 238, 228, 149, 214, 79, 6,
303 5, 66, 102, 14, 19, 111, 36, 143,
304 22, 85, 13, 78, 236, 121, 122, 77,
305 249, 39, 88, 12, 205, 143, 93, 240,
306 167, 89, 188, 50, 73, 69, 201, 251,
307 59, 32, 203, 184, 139, 191, 199, 74});
308
309 fill_tensor(input_to_output_weights, std::vector<uint8_t> { 205, 7, 95, 104, 252, 143, 226, 73,
310 229, 114, 152, 171, 221, 153, 73, 229,
311 153, 165, 223, 239, 100, 38, 172, 211,
312 226, 133, 239, 207, 116, 230, 170, 100,
313 241, 95, 171, 124, 63, 115, 32, 127,
314 141, 239, 53, 193, 201, 53, 104, 178,
315 186, 212, 167, 107, 226, 230, 71, 213,
316 148, 217, 19, 248, 233, 195, 183, 156 });
317
318 fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 147, 112, 140, 103, 3, 255, 17, 49,
319 84, 112, 144, 213, 138, 142, 112, 66,
320 117, 30, 101, 35, 25, 132, 211, 229,
321 183, 208, 102, 16, 38, 85, 101, 152,
322 226, 83, 132, 22, 161, 110, 157, 129,
323 184, 63, 168, 42, 220, 126, 209, 157,
324 5, 88, 243, 83, 249, 19, 226, 209,
325 173, 96, 185, 77, 146, 227, 238, 136 });
326
327
328 fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 52, 132, 92, 200, 213, 32, 213, 37,
329 116, 142, 116, 180, 4, 172, 158, 143,
330 110, 40, 99, 28, 221, 153, 133, 2,
331 247, 144, 198, 100, 20, 15, 221, 196,
332 159, 178, 188, 151, 171, 15, 25, 217,
333 178, 109, 110, 118, 128, 39, 232, 234,
334 184, 214, 177, 13, 56, 6, 28, 252,
335 89, 187, 242, 59, 146, 111, 132, 129});
336
337 fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 70, 44, 137, 29, 36, 127, 1, 241,
338 26, 241, 142, 114, 67, 181, 49, 57,
339 131, 152, 175, 77, 23, 63, 37, 124,
340 150, 113, 95, 103, 110, 201, 69, 97,
341 196, 242, 62, 214, 66, 19, 45, 135,
342 22, 168, 149, 104, 77, 101, 36, 68,
343 170, 116, 222, 100, 109, 1, 154, 18,
344 133, 215, 105, 93, 31, 57, 231, 112 });
345
346
347 fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 45 , 181 , 220 , 219 , 49 , 63 , 49 , 129,
348 7 , 166 , 104 , 114 , 83 , 40 , 1 , 195,
349 245 , 142 , 82 , 232 , 104 , 245 , 82 , 196,
350 111 , 56 , 156 , 9 , 141 , 240 , 180 , 148,
351 247 , 198 , 234 , 137 , 13 , 210 , 161 , 192,
352 196 , 59 , 233 , 184 , 142 , 187 , 140 , 166,
353 2 , 95 , 152 , 46 , 71 , 46 , 113 , 32,
354 175 , 229 , 86 , 87 , 62 , 93 , 74 , 130});
355
356 fill_tensor(input_gate_bias, std::vector<int> { -40040, -106916, -92315, -79123, 45160, -17954, 50962, -63758 });
357 fill_tensor(forget_gate_bias, std::vector<int> { -128514, 8463, -57831, 116977, 106547, -28132, -124557, 44941 });
358 fill_tensor(cell_gate_bias, std::vector<int> { 88388 , 123601, -116148, -13022, 21619, 48926, 57523, 39332 });
359 fill_tensor(output_gate_bias, std::vector<int> { 59485 , -33070, 21386, -100633, -115959, 125768, -56407, 24897 });
360
361 SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
362
363 // Initialize state
364 fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128, 128, 128, 128, 128,
365 128, 128, 128, 128, 128, 128, 128, 128,
366 128, 128, 128, 128, 128, 128, 128, 128,
367 128, 128, 128, 128, 128, 128, 128, 128,
368 128, 128, 128, 128, 128, 128, 128, 128,
369 128, 128, 128, 128, 128, 128, 128, 128,
370 128, 128, 128, 128, 128, 128, 128, 128,
371 128, 128, 128, 128, 128, 128, 128, 128,
372 128, 128, 128, 128, 128, 128, 128, 128,
373 128, 128, 128, 128, 128, 128, 128, 128,
374 128, 128, 128, 128, 128, 128, 128, 128,
375 128, 128, 128, 128, 128, 128, 128, 128,
376 128, 128, 128, 128, 128, 128, 128, 128,
377 128, 128, 128, 128, 128, 128, 128, 128,
378 128, 128, 128, 128, 128, 128, 128, 128,
379 128, 128, 128, 128, 128, 128, 128, 128 });
380
381 fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0, 0, 0, 0, 0,
382 0, 0, 0, 0, 0, 0, 0, 0,
383 0, 0, 0, 0, 0, 0, 0, 0,
384 0, 0, 0, 0, 0, 0, 0, 0,
385 0, 0, 0, 0, 0, 0, 0, 0,
386 0, 0, 0, 0, 0, 0, 0, 0,
387 0, 0, 0, 0, 0, 0, 0, 0,
388 0, 0, 0, 0, 0, 0, 0, 0,
389 0, 0, 0, 0, 0, 0, 0, 0,
390 0, 0, 0, 0, 0, 0, 0, 0,
391 0, 0, 0, 0, 0, 0, 0, 0,
392 0, 0, 0, 0, 0, 0, 0, 0,
393 0, 0, 0, 0, 0, 0, 0, 0,
394 0, 0, 0, 0, 0, 0, 0, 0,
395 0, 0, 0, 0, 0, 0, 0, 0,
396 0, 0, 0, 0, 0, 0, 0, 0});
397
398 // First input
399 fill_tensor(input, std::vector<uint8_t> { 247, 203, 159, 131, 182, 114, 207, 195,
400 48 , 61 , 154, 16, 80, 101, 116, 255,
401 50 , 115 , 45, 186, 75, 212, 98, 48,
402 88 , 146 , 24, 143, 218, 174, 203, 200,
403 239 , 16 , 66, 136, 234, 54, 94, 51,
404 101 , 128 , 220, 213, 164, 82, 137, 255,
405 70 , 165 , 234, 220, 66, 35, 183, 206,
406 39 , 57 , 180, 202, 23, 172, 224, 109,
407 102 , 215 , 186, 82, 215, 147, 85, 187,
408 96 , 249 , 59, 116, 150, 44, 167, 128,
409 34 , 217 , 148, 193, 243, 38, 250, 208,
410 112 , 130 , 208, 29, 16, 122, 20, 92,
411 24 , 72 , 104, 29, 150, 233, 151, 19,
412 158 , 192 , 254, 70, 73, 142, 106, 152,
413 3 , 61 , 24, 135, 212, 9, 80, 234,
414 147 , 246 , 83, 249, 49, 14, 68, 50});
415
416 fill_tensor(expected_output, std::vector<uint8_t> {131, 128, 128, 128, 128, 180, 129, 133,
417 136, 128, 126, 128, 128, 173, 135, 130,
418 160, 128, 128, 128, 128, 138, 132, 129,
419 131, 128, 127, 128, 128, 169, 129, 131,
420 133, 128, 128, 128, 128, 182, 130, 129,
421 131, 128, 128, 128, 128, 163, 129, 130,
422 131, 128, 128, 128, 128, 149, 132, 129,
423 143, 128, 127, 128, 128, 150, 134, 131,
424 134, 128, 128, 128, 128, 167, 130, 130,
425 131, 128, 128, 128, 128, 152, 132, 129,
426 128, 128, 128, 128, 128, 169, 130, 130,
427 173, 128, 128, 128, 128, 148, 139, 130,
428 152, 128, 128, 128, 128, 168, 139, 132,
429 147, 128, 128, 128, 128, 161, 131, 132,
430 130, 128, 128, 128, 128, 159, 134, 128,
431 140, 128, 128, 128, 128, 133, 132, 128 });
432
433 lstmq.run();
giuros01b5e75db2019-07-24 16:29:53 +0100434 validate(Accessor(output_state), expected_output, tolerance_qsymm16);
Michalis Spyrouba27e442019-05-28 10:04:57 +0100435
436 // Second input
437 fill_tensor(expected_output, std::vector<uint8_t> { 130, 128, 128, 128, 128, 205, 129, 137,
438 135, 128, 127, 128, 128, 190, 137, 132,
439 160, 128, 128, 128, 128, 142, 133, 131,
440 130, 128, 128, 128, 128, 185, 129, 133,
441 132, 128, 128, 128, 128, 198, 131, 130,
442 130, 128, 128, 128, 128, 178, 130, 131,
443 131, 128, 128, 128, 128, 158, 132, 131,
444 142, 128, 127, 128, 128, 158, 135, 134,
445 133, 128, 128, 128, 128, 178, 131, 132,
446 131, 128, 128, 128, 128, 160, 132, 130,
447 128, 128, 128, 128, 128, 190, 131, 131,
448 170, 128, 128, 128, 128, 157, 142, 131,
449 149, 128, 128, 128, 128, 178, 142, 135,
450 145, 128, 128, 128, 129, 173, 132, 135,
451 129, 128, 128, 128, 128, 171, 134, 129,
452 140, 128, 128, 128, 128, 135, 132, 129});
453 lstmq.run();
giuros01b5e75db2019-07-24 16:29:53 +0100454 validate(Accessor(output_state), expected_output, tolerance_qsymm16);
Michalis Spyrouba27e442019-05-28 10:04:57 +0100455}
456// clang-format on
457// *INDENT-ON*
458
459TEST_SUITE_END() // LSTMLayerQuantized
Manuel Bottini10c53f12019-07-17 16:11:53 +0100460TEST_SUITE_END() // CL
Michalis Spyrouba27e442019-05-28 10:04:57 +0100461} // namespace validation
462} // namespace test
463} // namespace arm_compute