blob: d5d036de339586c3371e00c66f813bf503f5dea1 [file] [log] [blame]
Michalis Spyrouba27e442019-05-28 10:04:57 +01001/*
2 * Copyright (c) 2019 ARM Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
Michalis Spyrouba27e442019-05-28 10:04:57 +010024#include "arm_compute/runtime/NEON/functions/NELSTMLayerQuantized.h"
Manuel Bottini10c53f12019-07-17 16:11:53 +010025
Michalis Spyrouba27e442019-05-28 10:04:57 +010026#include "tests/NEON/Accessor.h"
27#include "tests/PaddingCalculator.h"
28#include "tests/Utils.h"
29#include "tests/datasets/LSTMLayerDataset.h"
30#include "tests/framework/Asserts.h"
31#include "tests/framework/Macros.h"
32#include "tests/framework/datasets/Datasets.h"
33#include "tests/validation/Validation.h"
34
35#include <vector>
36
37namespace arm_compute
38{
39namespace test
40{
41namespace validation
42{
43namespace
44{
45template <typename T>
46inline void fill_tensor(Tensor &tensor, const std::vector<T> &v)
47{
48 // Import memory accounting for padding
49 TensorShape t_shape = tensor.info()->tensor_shape();
50 Window window;
51 window.use_tensor_dimensions(t_shape);
52 Iterator out(&tensor, window);
53 execute_window_loop(window, [&](const Coordinates & id)
54 {
55 *reinterpret_cast<T *>(out.ptr()) = v[coord2index(t_shape, id)];
56 },
57 out);
58}
59
60template <typename T>
61inline void fill_tensor(SimpleTensor<T> &tensor, const std::vector<T> &v)
62{
63 std::memcpy(tensor.data(), v.data(), sizeof(T) * v.size());
64}
65
66} // namespace
67
68TEST_SUITE(NEON)
69TEST_SUITE(LSTMLayerQuantized)
70
71// *INDENT-OFF*
72// clang-format off
73TEST_CASE(IntegrationTestCaseSmall, framework::DatasetMode::PRECOMMIT)
74{
75 const int batch_size = 2;
76 const int input_size = 2;
77 const int output_size = 4;
78
79
80 QuantizationInfo qasymm(1.f / 128.f, 128);
81 QuantizationInfo qweights(1.f / 128.f, 128);
82 QuantizationInfo qsymm_3(8.f / 32768.f, 0);
83 QuantizationInfo qsymm_4(16.f / 32768.f, 0);
84
85 TensorShape input_shape{ input_size, batch_size };
86 TensorShape input_weights_shape{ input_size, output_size };
87 TensorShape recurrent_weights_shape{ output_size, output_size };
88 TensorShape output_shape{ output_size, batch_size};
89 TensorShape bias_shape{ output_size };
90
91 auto input_to_input_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
92 auto input_to_forget_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
93 auto input_to_cell_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
94 auto input_to_output_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
95 auto recurrent_to_input_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
96 auto recurrent_to_forget_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
97 auto recurrent_to_cell_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
98 auto recurrent_to_output_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
99 auto input_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
100 auto forget_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
101 auto cell_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
102 auto output_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
103
104 // LSTM input
105 auto input = create_tensor<Tensor>(input_shape, DataType::QASYMM8, 1, qasymm);
106
107 // LSTM output state
108 auto output_state = create_tensor<Tensor>(output_shape, DataType::QASYMM8, 1, qasymm);
109
110 // LSTM cell state
111 auto cell_state = create_tensor<Tensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
112
113 NELSTMLayerQuantized lstmq;
114
115 lstmq.configure(&input, &input_to_input_weights, &input_to_forget_weights, &input_to_cell_weights, &input_to_output_weights,
116 &recurrent_to_input_weights, &recurrent_to_forget_weights, &recurrent_to_cell_weights, &recurrent_to_output_weights,
117 &input_gate_bias, &forget_gate_bias, &cell_gate_bias, &output_gate_bias, &cell_state, &output_state, &cell_state, &output_state);
118
119 input.allocator()->allocate();
120 input_to_input_weights.allocator()->allocate();
121 input_to_forget_weights.allocator()->allocate();
122 input_to_cell_weights.allocator()->allocate();
123 input_to_output_weights.allocator()->allocate();
124 recurrent_to_input_weights.allocator()->allocate();
125 recurrent_to_forget_weights.allocator()->allocate();
126 recurrent_to_cell_weights.allocator()->allocate();
127 recurrent_to_output_weights.allocator()->allocate();
128 input_gate_bias.allocator()->allocate();
129 forget_gate_bias.allocator()->allocate();
130 cell_gate_bias.allocator()->allocate();
131 output_gate_bias.allocator()->allocate();
132 cell_state.allocator()->allocate();
133 output_state.allocator()->allocate();
Michalis Spyrouba27e442019-05-28 10:04:57 +0100134
135 // Fill weights and biases
136 fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 47, 168,
137 66, 239,
138 6, 42,
139 237, 236 });
140
141 fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 204, 193,
142 148, 59,
143 113, 17,
144 66, 197 });
145
146 fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 172, 101,
147 184, 209,
148 165, 82,
149 108, 209 });
150
151 fill_tensor(input_to_output_weights, std::vector<uint8_t> { 203, 244,
152 219, 114,
153 130, 16,
154 163, 222 });
155
156 fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 162, 168, 7, 95,
157 91, 155, 108, 216,
158 255, 100, 48, 188,
159 58, 37, 186, 147 });
160
161 fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 46, 58, 47, 170,
162 246, 96, 12, 99,
163 68, 23, 186, 161,
164 237, 164, 89, 6 });
165
166 fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 234, 99, 71, 206,
167 205, 159, 64, 253,
168 191, 148, 116, 8,
169 209, 136, 59, 138 });
170
171 fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 23, 241, 137, 36,
172 206, 5, 227, 56,
173 254, 176, 231, 47,
174 18, 201, 161, 11 });
175
176 fill_tensor(input_gate_bias, std::vector<int> {-103038, 30525, 115255, -38154 });
177 fill_tensor(forget_gate_bias, std::vector<int> { -23428, 126970, 116806, 46307 });
178 fill_tensor(cell_gate_bias, std::vector<int> { 128006, 69949, -42808, 42568 });
179 fill_tensor(output_gate_bias, std::vector<int> { -67066, -53607, 47233, 7300 });
180
181 SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
182
183 // Initialize state
184 fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128,
185 128, 128, 128, 128 });
186 fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0,
187 0, 0, 0, 0 });
188
189 // First input
190 fill_tensor(input, std::vector<uint8_t> { 106, 193,
191 155, 150 });
192
193 fill_tensor(expected_output, std::vector<uint8_t> { 128, 130, 36, 134,
194 128, 131, 35, 133 });
195
196 lstmq.run();
197 validate(Accessor(output_state), expected_output);
198
199 // Second input
200 fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 12, 137,
201 128, 131, 10, 136 });
202 lstmq.run();
203 validate(Accessor(output_state), expected_output);
204
205 // Third input
206 fill_tensor(expected_output, std::vector<uint8_t> { 128, 129, 8, 140,
207 128, 130, 6, 138 });
208 lstmq.run();
209 validate(Accessor(output_state), expected_output);
210}
211
212TEST_CASE(IntegrationTestCaseLarge, framework::DatasetMode::PRECOMMIT)
213{
214 const int batch_size = 16;
215 const int input_size = 8;
216 const int output_size = 8;
217
218
219 QuantizationInfo qasymm(1.f / 128.f, 128);
220 QuantizationInfo qweights(1.f / 128.f, 128);
221 QuantizationInfo qsymm_3(8.f / 32768.f, 0);
222 QuantizationInfo qsymm_4(16.f / 32768.f, 0);
223
224 TensorShape input_shape{ input_size, batch_size };
225 TensorShape input_weights_shape{ input_size, output_size };
226 TensorShape recurrent_weights_shape{ output_size, output_size };
227 TensorShape output_shape{ output_size, batch_size};
228 TensorShape bias_shape{ output_size };
229
230 auto input_to_input_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
231 auto input_to_forget_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
232 auto input_to_cell_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
233 auto input_to_output_weights = create_tensor<Tensor>(input_weights_shape, DataType::QASYMM8, 1, qweights);
234 auto recurrent_to_input_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
235 auto recurrent_to_forget_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
236 auto recurrent_to_cell_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
237 auto recurrent_to_output_weights = create_tensor<Tensor>(recurrent_weights_shape, DataType::QASYMM8, 1, qweights);
238 auto input_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
239 auto forget_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
240 auto cell_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
241 auto output_gate_bias = create_tensor<Tensor>(bias_shape, DataType::S32);
242
243 // LSTM input
244 auto input = create_tensor<Tensor>(input_shape, DataType::QASYMM8, 1, qasymm);
245
246 // LSTM output state
247 auto output_state = create_tensor<Tensor>(output_shape, DataType::QASYMM8, 1, qasymm);
248
249 // LSTM cell state
250 auto cell_state = create_tensor<Tensor>(output_shape, DataType::QSYMM16, 1, qsymm_4);
251
252 NELSTMLayerQuantized lstmq;
253
254 lstmq.configure(&input, &input_to_input_weights, &input_to_forget_weights, &input_to_cell_weights, &input_to_output_weights,
255 &recurrent_to_input_weights, &recurrent_to_forget_weights, &recurrent_to_cell_weights, &recurrent_to_output_weights,
256 &input_gate_bias, &forget_gate_bias, &cell_gate_bias, &output_gate_bias, &cell_state, &output_state, &cell_state, &output_state);
257
258 input.allocator()->allocate();
259 input_to_input_weights.allocator()->allocate();
260 input_to_forget_weights.allocator()->allocate();
261 input_to_cell_weights.allocator()->allocate();
262 input_to_output_weights.allocator()->allocate();
263 recurrent_to_input_weights.allocator()->allocate();
264 recurrent_to_forget_weights.allocator()->allocate();
265 recurrent_to_cell_weights.allocator()->allocate();
266 recurrent_to_output_weights.allocator()->allocate();
267 input_gate_bias.allocator()->allocate();
268 forget_gate_bias.allocator()->allocate();
269 cell_gate_bias.allocator()->allocate();
270 output_gate_bias.allocator()->allocate();
271 cell_state.allocator()->allocate();
272 output_state.allocator()->allocate();
273
274 // Fill weights and biases
275 fill_tensor(input_to_input_weights, std::vector<uint8_t>{ 141, 89, 200, 180, 46, 50, 87, 128,
276 149, 227, 177, 187, 212, 229, 54, 111,
277 131, 116, 3, 58, 196, 26, 131, 255,
278 22, 106, 216, 69, 239, 12, 232, 207,
279 184, 56, 236, 172, 28, 143, 161, 124,
280 255, 33, 197, 122, 47, 197, 26, 229,
281 91, 79, 11, 160, 26, 80, 100, 36,
282 248, 186, 97, 61, 125, 46, 14, 100, });
283
284 fill_tensor(input_to_forget_weights, std::vector<uint8_t> { 237, 165, 141, 249, 72, 116, 36 , 115,
285 234, 213, 85, 84, 59, 62, 150, 246,
286 182, 102, 158, 214, 182, 183, 94, 11,
287 158, 192, 92, 189, 160, 219, 206, 249,
288 88, 213, 193, 244, 151, 72, 129, 49,
289 239, 83, 106, 9, 169, 187, 125, 171,
290 32, 141, 126, 92, 13, 36, 224, 150,
291 187, 250, 178, 169, 89, 214, 91, 173 });
292
293 fill_tensor(input_to_cell_weights, std::vector<uint8_t> { 93, 103, 226, 139, 185, 252, 129, 171,
294 159, 32, 25, 175, 224, 183, 165, 35,
295 207, 69, 238, 228, 149, 214, 79, 6,
296 5, 66, 102, 14, 19, 111, 36, 143,
297 22, 85, 13, 78, 236, 121, 122, 77,
298 249, 39, 88, 12, 205, 143, 93, 240,
299 167, 89, 188, 50, 73, 69, 201, 251,
300 59, 32, 203, 184, 139, 191, 199, 74});
301
302 fill_tensor(input_to_output_weights, std::vector<uint8_t> { 205, 7, 95, 104, 252, 143, 226, 73,
303 229, 114, 152, 171, 221, 153, 73, 229,
304 153, 165, 223, 239, 100, 38, 172, 211,
305 226, 133, 239, 207, 116, 230, 170, 100,
306 241, 95, 171, 124, 63, 115, 32, 127,
307 141, 239, 53, 193, 201, 53, 104, 178,
308 186, 212, 167, 107, 226, 230, 71, 213,
309 148, 217, 19, 248, 233, 195, 183, 156 });
310
311 fill_tensor(recurrent_to_input_weights, std::vector<uint8_t> { 147, 112, 140, 103, 3, 255, 17, 49,
312 84, 112, 144, 213, 138, 142, 112, 66,
313 117, 30, 101, 35, 25, 132, 211, 229,
314 183, 208, 102, 16, 38, 85, 101, 152,
315 226, 83, 132, 22, 161, 110, 157, 129,
316 184, 63, 168, 42, 220, 126, 209, 157,
317 5, 88, 243, 83, 249, 19, 226, 209,
318 173, 96, 185, 77, 146, 227, 238, 136 });
319
320
321 fill_tensor(recurrent_to_forget_weights, std::vector<uint8_t> { 52, 132, 92, 200, 213, 32, 213, 37,
322 116, 142, 116, 180, 4, 172, 158, 143,
323 110, 40, 99, 28, 221, 153, 133, 2,
324 247, 144, 198, 100, 20, 15, 221, 196,
325 159, 178, 188, 151, 171, 15, 25, 217,
326 178, 109, 110, 118, 128, 39, 232, 234,
327 184, 214, 177, 13, 56, 6, 28, 252,
328 89, 187, 242, 59, 146, 111, 132, 129});
329
330 fill_tensor(recurrent_to_cell_weights, std::vector<uint8_t> { 70, 44, 137, 29, 36, 127, 1, 241,
331 26, 241, 142, 114, 67, 181, 49, 57,
332 131, 152, 175, 77, 23, 63, 37, 124,
333 150, 113, 95, 103, 110, 201, 69, 97,
334 196, 242, 62, 214, 66, 19, 45, 135,
335 22, 168, 149, 104, 77, 101, 36, 68,
336 170, 116, 222, 100, 109, 1, 154, 18,
337 133, 215, 105, 93, 31, 57, 231, 112 });
338
339
340 fill_tensor(recurrent_to_output_weights, std::vector<uint8_t> { 45 , 181 , 220 , 219 , 49 , 63 , 49 , 129,
341 7 , 166 , 104 , 114 , 83 , 40 , 1 , 195,
342 245 , 142 , 82 , 232 , 104 , 245 , 82 , 196,
343 111 , 56 , 156 , 9 , 141 , 240 , 180 , 148,
344 247 , 198 , 234 , 137 , 13 , 210 , 161 , 192,
345 196 , 59 , 233 , 184 , 142 , 187 , 140 , 166,
346 2 , 95 , 152 , 46 , 71 , 46 , 113 , 32,
347 175 , 229 , 86 , 87 , 62 , 93 , 74 , 130});
348
349 fill_tensor(input_gate_bias, std::vector<int> { -40040, -106916, -92315, -79123, 45160, -17954, 50962, -63758 });
350 fill_tensor(forget_gate_bias, std::vector<int> { -128514, 8463, -57831, 116977, 106547, -28132, -124557, 44941 });
351 fill_tensor(cell_gate_bias, std::vector<int> { 88388 , 123601, -116148, -13022, 21619, 48926, 57523, 39332 });
352 fill_tensor(output_gate_bias, std::vector<int> { 59485 , -33070, 21386, -100633, -115959, 125768, -56407, 24897 });
353
354 SimpleTensor<uint8_t> expected_output(output_shape, DataType::QASYMM8, 1, qasymm);
355
356 // Initialize state
357 fill_tensor(output_state, std::vector<uint8_t> { 128, 128, 128, 128, 128, 128, 128, 128,
358 128, 128, 128, 128, 128, 128, 128, 128,
359 128, 128, 128, 128, 128, 128, 128, 128,
360 128, 128, 128, 128, 128, 128, 128, 128,
361 128, 128, 128, 128, 128, 128, 128, 128,
362 128, 128, 128, 128, 128, 128, 128, 128,
363 128, 128, 128, 128, 128, 128, 128, 128,
364 128, 128, 128, 128, 128, 128, 128, 128,
365 128, 128, 128, 128, 128, 128, 128, 128,
366 128, 128, 128, 128, 128, 128, 128, 128,
367 128, 128, 128, 128, 128, 128, 128, 128,
368 128, 128, 128, 128, 128, 128, 128, 128,
369 128, 128, 128, 128, 128, 128, 128, 128,
370 128, 128, 128, 128, 128, 128, 128, 128,
371 128, 128, 128, 128, 128, 128, 128, 128,
372 128, 128, 128, 128, 128, 128, 128, 128 });
373
374 fill_tensor(cell_state, std::vector<int16_t> { 0, 0, 0, 0, 0, 0, 0, 0,
375 0, 0, 0, 0, 0, 0, 0, 0,
376 0, 0, 0, 0, 0, 0, 0, 0,
377 0, 0, 0, 0, 0, 0, 0, 0,
378 0, 0, 0, 0, 0, 0, 0, 0,
379 0, 0, 0, 0, 0, 0, 0, 0,
380 0, 0, 0, 0, 0, 0, 0, 0,
381 0, 0, 0, 0, 0, 0, 0, 0,
382 0, 0, 0, 0, 0, 0, 0, 0,
383 0, 0, 0, 0, 0, 0, 0, 0,
384 0, 0, 0, 0, 0, 0, 0, 0,
385 0, 0, 0, 0, 0, 0, 0, 0,
386 0, 0, 0, 0, 0, 0, 0, 0,
387 0, 0, 0, 0, 0, 0, 0, 0,
388 0, 0, 0, 0, 0, 0, 0, 0,
389 0, 0, 0, 0, 0, 0, 0, 0});
390
391 // First input
392 fill_tensor(input, std::vector<uint8_t> { 247, 203, 159, 131, 182, 114, 207, 195,
393 48 , 61 , 154, 16, 80, 101, 116, 255,
394 50 , 115 , 45, 186, 75, 212, 98, 48,
395 88 , 146 , 24, 143, 218, 174, 203, 200,
396 239 , 16 , 66, 136, 234, 54, 94, 51,
397 101 , 128 , 220, 213, 164, 82, 137, 255,
398 70 , 165 , 234, 220, 66, 35, 183, 206,
399 39 , 57 , 180, 202, 23, 172, 224, 109,
400 102 , 215 , 186, 82, 215, 147, 85, 187,
401 96 , 249 , 59, 116, 150, 44, 167, 128,
402 34 , 217 , 148, 193, 243, 38, 250, 208,
403 112 , 130 , 208, 29, 16, 122, 20, 92,
404 24 , 72 , 104, 29, 150, 233, 151, 19,
405 158 , 192 , 254, 70, 73, 142, 106, 152,
406 3 , 61 , 24, 135, 212, 9, 80, 234,
407 147 , 246 , 83, 249, 49, 14, 68, 50});
408
409 fill_tensor(expected_output, std::vector<uint8_t> {131, 128, 128, 128, 128, 180, 129, 133,
410 136, 128, 126, 128, 128, 173, 135, 130,
411 160, 128, 128, 128, 128, 138, 132, 129,
412 131, 128, 127, 128, 128, 169, 129, 131,
413 133, 128, 128, 128, 128, 182, 130, 129,
414 131, 128, 128, 128, 128, 163, 129, 130,
415 131, 128, 128, 128, 128, 149, 132, 129,
416 143, 128, 127, 128, 128, 150, 134, 131,
417 134, 128, 128, 128, 128, 167, 130, 130,
418 131, 128, 128, 128, 128, 152, 132, 129,
419 128, 128, 128, 128, 128, 169, 130, 130,
420 173, 128, 128, 128, 128, 148, 139, 130,
421 152, 128, 128, 128, 128, 168, 139, 132,
422 147, 128, 128, 128, 128, 161, 131, 132,
423 130, 128, 128, 128, 128, 159, 134, 128,
424 140, 128, 128, 128, 128, 133, 132, 128 });
425
426 lstmq.run();
427 validate(Accessor(output_state), expected_output);
428
429 // Second input
430 fill_tensor(expected_output, std::vector<uint8_t> { 130, 128, 128, 128, 128, 205, 129, 137,
431 135, 128, 127, 128, 128, 190, 137, 132,
432 160, 128, 128, 128, 128, 142, 133, 131,
433 130, 128, 128, 128, 128, 185, 129, 133,
434 132, 128, 128, 128, 128, 198, 131, 130,
435 130, 128, 128, 128, 128, 178, 130, 131,
436 131, 128, 128, 128, 128, 158, 132, 131,
437 142, 128, 127, 128, 128, 158, 135, 134,
438 133, 128, 128, 128, 128, 178, 131, 132,
439 131, 128, 128, 128, 128, 160, 132, 130,
440 128, 128, 128, 128, 128, 190, 131, 131,
441 170, 128, 128, 128, 128, 157, 142, 131,
442 149, 128, 128, 128, 128, 178, 142, 135,
443 145, 128, 128, 128, 129, 173, 132, 135,
444 129, 128, 128, 128, 128, 171, 134, 129,
445 140, 128, 128, 128, 128, 135, 132, 129});
446 lstmq.run();
447 validate(Accessor(output_state), expected_output);
448}
449// clang-format on
450// *INDENT-ON*
451
452TEST_SUITE_END() // LSTMLayerQuantized
Manuel Bottini10c53f12019-07-17 16:11:53 +0100453TEST_SUITE_END() // CL
Michalis Spyrouba27e442019-05-28 10:04:57 +0100454} // namespace validation
455} // namespace test
456} // namespace arm_compute