blob: 41c12c91e7e36bf08f864a18e0f45029e6eb92d6 [file] [log] [blame]
Michalis Spyrouba27e442019-05-28 10:04:57 +01001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24#include "arm_compute/runtime/NEON/functions/NELSTMLayer.h"
25#include "arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h"
26#include "tests/NEON/Accessor.h"
27#include "tests/PaddingCalculator.h"
28#include "tests/Utils.h"
29#include "tests/datasets/LSTMLayerDataset.h"
30#include "tests/framework/Asserts.h"
31#include "tests/framework/Macros.h"
32#include "tests/framework/datasets/Datasets.h"
33#include "tests/validation/Validation.h"
34
35#include <vector>
36
37namespace arm_compute
38{
39namespace test
40{
41namespace validation
42{
43namespace
44{
45template <typename T>
46inline void fill_tensor(Tensor &tensor, const std::vector<T> &v)
47{
48 // Import memory accounting for padding
49 TensorShape t_shape = tensor.info()->tensor_shape();
50 Window window;
51 window.use_tensor_dimensions(t_shape);
52 Iterator out(&tensor, window);
53 execute_window_loop(window, [&](const Coordinates & id)
54 {
55 *reinterpret_cast<T *>(out.ptr()) = v[coord2index(t_shape, id)];
56 },
57 out);
58}
59
60template <typename T>
61inline void fill_tensor(SimpleTensor<T> &tensor, const std::vector<T> &v)
62{
63 std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
64}
65
66} // namespace
67
68TEST_SUITE(NEON)
69TEST_SUITE(LSTMLayerQuantized)
70
71// *INDENT-OFF*
72// clang-format off
73TEST_CASE(IntegrationTestCaseSmall, framework::DatasetMode::PRECOMMIT)
74{
75 const int batch_size = 2;
76 const int input_size = 2;
77 const int output_size = 4;
78
79
80 QuantizationInfo qasymm(1.f / 128.f, 128);
81 QuantizationInfo qweights(1.f / 128.f, 128);
82 QuantizationInfo qsymm_3(8.f / 32768.f, 0);
83 QuantizationInfo qsymm_4(16.f / 32768.f, 0);
84
85 TensorShape input_shape{ input_size, batch_size };
86 TensorShape input_weights_shape{ input_size, output_size };
87 TensorShape recurrent_weights_shape{ output_size, output_size };
88 TensorShape output_shape{ output_size, batch_size};
89 TensorShape bias_shape{ output_size };
90
91 auto input_to_input_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
92 auto input_to_forget_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
93 auto input_to_cell_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
94 auto input_to_output_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
95 auto recurrent_to_input_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
96 auto recurrent_to_forget_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
97 auto recurrent_to_cell_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
98 auto recurrent_to_output_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
99 auto input_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
100 auto forget_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
101 auto cell_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
102 auto output_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
103
104 // LSTM input
105 auto input = create_tensor<Tensor>(input_shape, DataType::QASYMM8, 1, qasymm);
106
107 // LSTM output state
108 auto output_state = create_tensor<Tensor>(output_shape, DataType::QASYMM8, 1, qasymm);
109
110 // LSTM cell state
111 auto cell_state = create_tensor<Tensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
112
113 NELSTMLayerQuantized lstmq;
114
115 lstmq.configure(&input, &input_to_input_weights, &input_to_forget_weights, &input_to_cell_weights, &input_to_output_weights,
116 &recurrent_to_input_weights, &recurrent_to_forget_weights, &recurrent_to_cell_weights, &recurrent_to_output_weights,
117 &input_gate_bias, &forget_gate_bias, &cell_gate_bias, &output_gate_bias, &cell_state, &output_state, &cell_state, &output_state);
118
119 input.allocator()->allocate();
120 input_to_input_weights.allocator()->allocate();
121 input_to_forget_weights.allocator()->allocate();
122 input_to_cell_weights.allocator()->allocate();
123 input_to_output_weights.allocator()->allocate();
124 recurrent_to_input_weights.allocator()->allocate();
125 recurrent_to_forget_weights.allocator()->allocate();
126 recurrent_to_cell_weights.allocator()->allocate();
127 recurrent_to_output_weights.allocator()->allocate();
128 input_gate_bias.allocator()->allocate();
129 forget_gate_bias.allocator()->allocate();
130 cell_gate_bias.allocator()->allocate();
131 output_gate_bias.allocator()->allocate();
132 cell_state.allocator()->allocate();
133 output_state.allocator()->allocate();
134 cell_state.allocator()->allocate();
135 output_state.allocator()->allocate();
136
137 // Fill weights and biases
138 fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 47, 168,
139 66, 239,
140 6, 42,
141 237, 236 });
142
143 fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 204, 193,
144 148, 59,
145 113, 17,
146 66, 197 });
147
148 fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 172, 101,
149 184, 209,
150 165, 82,
151 108, 209 });
152
153 fill_tensor(input_to_output_weights, std::vector<uint8_t> { 203, 244,
154 219, 114,
155 130, 16,
156 163, 222 });
157
158 fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 162, 168, 7, 95,
159 91, 155, 108, 216,
160 255, 100, 48, 188,
161 58, 37, 186, 147 });
162
163 fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 46, 58, 47, 170,
164 246, 96, 12, 99,
165 68, 23, 186, 161,
166 237, 164, 89, 6 });
167
168 fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 234, 99, 71, 206,
169 205, 159, 64, 253,
170 191, 148, 116, 8,
171 209, 136, 59, 138 });
172
173 fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 23, 241, 137, 36,
174 206, 5, 227, 56,
175 254, 176, 231, 47,
176 18, 201, 161, 11 });
177
178 fill_tensor(input_gate_bias, std::vector<int> {-103038, 30525, 115255, -38154 });
179 fill_tensor(forget_gate_bias, std::vector<int> { -23428, 126970, 116806, 46307 });
180 fill_tensor(cell_gate_bias, std::vector<int> { 128006, 69949, -42808, 42568 });
181 fill_tensor(output_gate_bias, std::vector<int> { -67066, -53607, 47233, 7300 });
182
183 SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
184
185 // Initialize state
186 fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128,
187 128, 128, 128, 128 });
188 fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0,
189 0, 0, 0, 0 });
190
191 // First input
192 fill_tensor(input, std::vector<uint8_t> { 106, 193,
193 155, 150 });
194
195 fill_tensor(expected_output, std::vector<uint8_t> { 128, 130, 36, 134,
196 128, 131, 35, 133 });
197
198 lstmq.run();
199 validate(Accessor(output_state), expected_output);
200
201 // Second input
202 fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 12, 137,
203 128, 131, 10, 136 });
204 lstmq.run();
205 validate(Accessor(output_state), expected_output);
206
207 // Third input
208 fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 8, 140,
209 128, 130, 6, 138 });
210 lstmq.run();
211 validate(Accessor(output_state), expected_output);
212}
213
214TEST_CASE(IntegrationTestCaseLarge, framework::DatasetMode::PRECOMMIT)
215{
216 const int batch_size = 16;
217 const int input_size = 8;
218 const int output_size = 8;
219
220
221 QuantizationInfo qasymm(1.f / 128.f, 128);
222 QuantizationInfo qweights(1.f / 128.f, 128);
223 QuantizationInfo qsymm_3(8.f / 32768.f, 0);
224 QuantizationInfo qsymm_4(16.f / 32768.f, 0);
225
226 TensorShape input_shape{ input_size, batch_size };
227 TensorShape input_weights_shape{ input_size, output_size };
228 TensorShape recurrent_weights_shape{ output_size, output_size };
229 TensorShape output_shape{ output_size, batch_size};
230 TensorShape bias_shape{ output_size };
231
232 auto input_to_input_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
233 auto input_to_forget_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
234 auto input_to_cell_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
235 auto input_to_output_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
236 auto recurrent_to_input_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
237 auto recurrent_to_forget_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
238 auto recurrent_to_cell_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
239 auto recurrent_to_output_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
240 auto input_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
241 auto forget_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
242 auto cell_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
243 auto output_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
244
245 // LSTM input
246 auto input = create_tensor<Tensor>(input_shape, DataType::QASYMM8, 1, qasymm);
247
248 // LSTM output state
249 auto output_state = create_tensor<Tensor>(output_shape, DataType::QASYMM8, 1, qasymm);
250
251 // LSTM cell state
252 auto cell_state = create_tensor<Tensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
253
254 NELSTMLayerQuantized lstmq;
255
256 lstmq.configure(&input, &input_to_input_weights, &input_to_forget_weights, &input_to_cell_weights, &input_to_output_weights,
257 &recurrent_to_input_weights, &recurrent_to_forget_weights, &recurrent_to_cell_weights, &recurrent_to_output_weights,
258 &input_gate_bias, &forget_gate_bias, &cell_gate_bias, &output_gate_bias, &cell_state, &output_state, &cell_state, &output_state);
259
260 input.allocator()->allocate();
261 input_to_input_weights.allocator()->allocate();
262 input_to_forget_weights.allocator()->allocate();
263 input_to_cell_weights.allocator()->allocate();
264 input_to_output_weights.allocator()->allocate();
265 recurrent_to_input_weights.allocator()->allocate();
266 recurrent_to_forget_weights.allocator()->allocate();
267 recurrent_to_cell_weights.allocator()->allocate();
268 recurrent_to_output_weights.allocator()->allocate();
269 input_gate_bias.allocator()->allocate();
270 forget_gate_bias.allocator()->allocate();
271 cell_gate_bias.allocator()->allocate();
272 output_gate_bias.allocator()->allocate();
273 cell_state.allocator()->allocate();
274 output_state.allocator()->allocate();
275
276 // Fill weights and biases
277 fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 141, 89, 200, 180, 46, 50, 87, 128,
278 149, 227, 177, 187, 212, 229, 54, 111,
279 131, 116, 3, 58, 196, 26, 131, 255,
280 22, 106, 216, 69, 239, 12, 232, 207,
281 184, 56, 236, 172, 28, 143, 161, 124,
282 255, 33, 197, 122, 47, 197, 26, 229,
283 91, 79, 11, 160, 26, 80, 100, 36,
284 248, 186, 97, 61, 125, 46, 14, 100, });
285
286 fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 237, 165, 141, 249, 72, 116, 36 , 115,
287 234, 213, 85, 84, 59, 62, 150, 246,
288 182, 102, 158, 214, 182, 183, 94, 11,
289 158, 192, 92, 189, 160, 219, 206, 249,
290 88, 213, 193, 244, 151, 72, 129, 49,
291 239, 83, 106, 9, 169, 187, 125, 171,
292 32, 141, 126, 92, 13, 36, 224, 150,
293 187, 250, 178, 169, 89, 214, 91, 173 });
294
295 fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 93, 103, 226, 139, 185, 252, 129, 171,
296 159, 32, 25, 175, 224, 183, 165, 35,
297 207, 69, 238, 228, 149, 214, 79, 6,
298 5, 66, 102, 14, 19, 111, 36, 143,
299 22, 85, 13, 78, 236, 121, 122, 77,
300 249, 39, 88, 12, 205, 143, 93, 240,
301 167, 89, 188, 50, 73, 69, 201, 251,
302 59, 32, 203, 184, 139, 191, 199, 74});
303
304 fill_tensor(input_to_output_weights, std::vector<uint8_t> { 205, 7, 95, 104, 252, 143, 226, 73,
305 229, 114, 152, 171, 221, 153, 73, 229,
306 153, 165, 223, 239, 100, 38, 172, 211,
307 226, 133, 239, 207, 116, 230, 170, 100,
308 241, 95, 171, 124, 63, 115, 32, 127,
309 141, 239, 53, 193, 201, 53, 104, 178,
310 186, 212, 167, 107, 226, 230, 71, 213,
311 148, 217, 19, 248, 233, 195, 183, 156 });
312
313 fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 147, 112, 140, 103, 3, 255, 17, 49,
314 84, 112, 144, 213, 138, 142, 112, 66,
315 117, 30, 101, 35, 25, 132, 211, 229,
316 183, 208, 102, 16, 38, 85, 101, 152,
317 226, 83, 132, 22, 161, 110, 157, 129,
318 184, 63, 168, 42, 220, 126, 209, 157,
319 5, 88, 243, 83, 249, 19, 226, 209,
320 173, 96, 185, 77, 146, 227, 238, 136 });
321
322
323 fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 52, 132, 92, 200, 213, 32, 213, 37,
324 116, 142, 116, 180, 4, 172, 158, 143,
325 110, 40, 99, 28, 221, 153, 133, 2,
326 247, 144, 198, 100, 20, 15, 221, 196,
327 159, 178, 188, 151, 171, 15, 25, 217,
328 178, 109, 110, 118, 128, 39, 232, 234,
329 184, 214, 177, 13, 56, 6, 28, 252,
330 89, 187, 242, 59, 146, 111, 132, 129});
331
332 fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 70, 44, 137, 29, 36, 127, 1, 241,
333 26, 241, 142, 114, 67, 181, 49, 57,
334 131, 152, 175, 77, 23, 63, 37, 124,
335 150, 113, 95, 103, 110, 201, 69, 97,
336 196, 242, 62, 214, 66, 19, 45, 135,
337 22, 168, 149, 104, 77, 101, 36, 68,
338 170, 116, 222, 100, 109, 1, 154, 18,
339 133, 215, 105, 93, 31, 57, 231, 112 });
340
341
342 fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 45 , 181 , 220 , 219 , 49 , 63 , 49 , 129,
343 7 , 166 , 104 , 114 , 83 , 40 , 1 , 195,
344 245 , 142 , 82 , 232 , 104 , 245 , 82 , 196,
345 111 , 56 , 156 , 9 , 141 , 240 , 180 , 148,
346 247 , 198 , 234 , 137 , 13 , 210 , 161 , 192,
347 196 , 59 , 233 , 184 , 142 , 187 , 140 , 166,
348 2 , 95 , 152 , 46 , 71 , 46 , 113 , 32,
349 175 , 229 , 86 , 87 , 62 , 93 , 74 , 130});
350
351 fill_tensor(input_gate_bias, std::vector<int> { -40040, -106916, -92315, -79123, 45160, -17954, 50962, -63758 });
352 fill_tensor(forget_gate_bias, std::vector<int> { -128514, 8463, -57831, 116977, 106547, -28132, -124557, 44941 });
353 fill_tensor(cell_gate_bias, std::vector<int> { 88388 , 123601, -116148, -13022, 21619, 48926, 57523, 39332 });
354 fill_tensor(output_gate_bias, std::vector<int> { 59485 , -33070, 21386, -100633, -115959, 125768, -56407, 24897 });
355
356 SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
357
358 // Initialize state
359 fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128, 128, 128, 128, 128,
360 128, 128, 128, 128, 128, 128, 128, 128,
361 128, 128, 128, 128, 128, 128, 128, 128,
362 128, 128, 128, 128, 128, 128, 128, 128,
363 128, 128, 128, 128, 128, 128, 128, 128,
364 128, 128, 128, 128, 128, 128, 128, 128,
365 128, 128, 128, 128, 128, 128, 128, 128,
366 128, 128, 128, 128, 128, 128, 128, 128,
367 128, 128, 128, 128, 128, 128, 128, 128,
368 128, 128, 128, 128, 128, 128, 128, 128,
369 128, 128, 128, 128, 128, 128, 128, 128,
370 128, 128, 128, 128, 128, 128, 128, 128,
371 128, 128, 128, 128, 128, 128, 128, 128,
372 128, 128, 128, 128, 128, 128, 128, 128,
373 128, 128, 128, 128, 128, 128, 128, 128,
374 128, 128, 128, 128, 128, 128, 128, 128 });
375
376 fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0, 0, 0, 0, 0,
377 0, 0, 0, 0, 0, 0, 0, 0,
378 0, 0, 0, 0, 0, 0, 0, 0,
379 0, 0, 0, 0, 0, 0, 0, 0,
380 0, 0, 0, 0, 0, 0, 0, 0,
381 0, 0, 0, 0, 0, 0, 0, 0,
382 0, 0, 0, 0, 0, 0, 0, 0,
383 0, 0, 0, 0, 0, 0, 0, 0,
384 0, 0, 0, 0, 0, 0, 0, 0,
385 0, 0, 0, 0, 0, 0, 0, 0,
386 0, 0, 0, 0, 0, 0, 0, 0,
387 0, 0, 0, 0, 0, 0, 0, 0,
388 0, 0, 0, 0, 0, 0, 0, 0,
389 0, 0, 0, 0, 0, 0, 0, 0,
390 0, 0, 0, 0, 0, 0, 0, 0,
391 0, 0, 0, 0, 0, 0, 0, 0});
392
393 // First input
394 fill_tensor(input, std::vector<uint8_t> { 247, 203, 159, 131, 182, 114, 207, 195,
395 48 , 61 , 154, 16, 80, 101, 116, 255,
396 50 , 115 , 45, 186, 75, 212, 98, 48,
397 88 , 146 , 24, 143, 218, 174, 203, 200,
398 239 , 16 , 66, 136, 234, 54, 94, 51,
399 101 , 128 , 220, 213, 164, 82, 137, 255,
400 70 , 165 , 234, 220, 66, 35, 183, 206,
401 39 , 57 , 180, 202, 23, 172, 224, 109,
402 102 , 215 , 186, 82, 215, 147, 85, 187,
403 96 , 249 , 59, 116, 150, 44, 167, 128,
404 34 , 217 , 148, 193, 243, 38, 250, 208,
405 112 , 130 , 208, 29, 16, 122, 20, 92,
406 24 , 72 , 104, 29, 150, 233, 151, 19,
407 158 , 192 , 254, 70, 73, 142, 106, 152,
408 3 , 61 , 24, 135, 212, 9, 80, 234,
409 147 , 246 , 83, 249, 49, 14, 68, 50});
410
411 fill_tensor(expected_output, std::vector<uint8_t> {131, 128, 128, 128, 128, 180, 129, 133,
412 136, 128, 126, 128, 128, 173, 135, 130,
413 160, 128, 128, 128, 128, 138, 132, 129,
414 131, 128, 127, 128, 128, 169, 129, 131,
415 133, 128, 128, 128, 128, 182, 130, 129,
416 131, 128, 128, 128, 128, 163, 129, 130,
417 131, 128, 128, 128, 128, 149, 132, 129,
418 143, 128, 127, 128, 128, 150, 134, 131,
419 134, 128, 128, 128, 128, 167, 130, 130,
420 131, 128, 128, 128, 128, 152, 132, 129,
421 128, 128, 128, 128, 128, 169, 130, 130,
422 173, 128, 128, 128, 128, 148, 139, 130,
423 152, 128, 128, 128, 128, 168, 139, 132,
424 147, 128, 128, 128, 128, 161, 131, 132,
425 130, 128, 128, 128, 128, 159, 134, 128,
426 140, 128, 128, 128, 128, 133, 132, 128 });
427
428 lstmq.run();
429 validate(Accessor(output_state), expected_output);
430
431 // Second input
432 fill_tensor(expected_output, std::vector<uint8_t> { 130, 128, 128, 128, 128, 205, 129, 137,
433 135, 128, 127, 128, 128, 190, 137, 132,
434 160, 128, 128, 128, 128, 142, 133, 131,
435 130, 128, 128, 128, 128, 185, 129, 133,
436 132, 128, 128, 128, 128, 198, 131, 130,
437 130, 128, 128, 128, 128, 178, 130, 131,
438 131, 128, 128, 128, 128, 158, 132, 131,
439 142, 128, 127, 128, 128, 158, 135, 134,
440 133, 128, 128, 128, 128, 178, 131, 132,
441 131, 128, 128, 128, 128, 160, 132, 130,
442 128, 128, 128, 128, 128, 190, 131, 131,
443 170, 128, 128, 128, 128, 157, 142, 131,
444 149, 128, 128, 128, 128, 178, 142, 135,
445 145, 128, 128, 128, 129, 173, 132, 135,
446 129, 128, 128, 128, 128, 171, 134, 129,
447 140, 128, 128, 128, 128, 135, 132, 129});
448 lstmq.run();
449 validate(Accessor(output_state), expected_output);
450}
451// clang-format on
452// *INDENT-ON*
453
454TEST_SUITE_END() // LSTMLayerQuantized
455TEST_SUITE_END() // NEON
456} // namespace validation
457} // namespace test
458} // namespace arm_compute