blob: 4bee7157887ac08c29bdee0693987b9828ecc9fc [file] [log] [blame]
Narumol Prangnawarat7684b182021-08-12 14:48:15 +01001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "UnidirectionalSequenceLstmTestHelper.hpp"
7
8#include <armnn_delegate.hpp>
9
10#include <flatbuffers/flatbuffers.h>
11#include <tensorflow/lite/schema/schema_generated.h>
12#include <doctest/doctest.h>
13
14namespace armnnDelegate
15{
16
17void UnidirectionalSequenceLstmTest(std::vector<armnn::BackendId>& backends)
18{
19 int32_t batchSize = 3;
20 int32_t timeSize = 2;
21 int32_t inputSize = 3;
22 int32_t outputSize = 4;
23 // cellSize and outputSize have the same size when there is no projection.
24 int32_t numUnits = outputSize;
25
26 //tensorInfo12,
27 bool hasInputToInputWeights = true;
28 std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
29 -0.117484632f, 0.3298470976f, -0.1179017122f,
30 0.214305695f, 0.42135173085f, 0.003878414626f,
31 -0.348303917f, -0.1881275477f, 0.0343011027f };
32
33 std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
34 -0.3810434485f, 0.268383264f, -0.009807467424f,
35 -0.3522925403f, -0.24275735512f, -0.28344226125f,
36 0.13512269116f, -0.4932442977f, -0.10039821991f };
37
38 std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
39 0.386399507f, -0.259465157985f, -0.16545993089f,
40 -0.4230232555f, 0.341664791103f, -0.18127849691f,
41 -0.2277662414f, -0.55275535589f, 0.34184026718f };
42
43 std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
44 0.53969591851f, 0.23393625035f, -0.27140527306f,
45 0.50009280443f, 0.07511717046f, 0.3998299249f,
46 -0.51717478049f, 0.1889653282f, -0.367323637f };
47
48 //tensorInfo16,
49 bool hasRecurrentToInputWeights = true;
50 std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
51 -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
52 0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
53 0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f };
54
55 std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
56 -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
57 -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
58 -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
59
60 std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
61 -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
62 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
63 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
64
65 std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
66 -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
67 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
68 -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
69 // tensorInfo4
70 bool hasCellToInputWeights = false;
71 std::vector<float> cellToInputWeights;
72 bool hasCellToForgetWeights = false;
73 std::vector<float> cellToForgetWeights;
74 bool hasCellToOutputWeights = false;
75 std::vector<float> cellToOutputWeights;
76
77 bool hasInputGateBias = true;
78 std::vector<float> inputGateBias = {0., 0., 0., 0.};
79 std::vector<float> forgetGateBias = {1., 1., 1., 1.};
80 std::vector<float> cellBias = {0., 0., 0., 0.};
81 std::vector<float> outputGateBias = {0., 0., 0., 0.};
82
83 bool hasProjectionWeights = false;
84 std::vector<float> projectionWeights;
85 bool hasProjectionBias = false;
86 std::vector<float> projectionBias;
87
88 bool hasInputLayerNormWeights = false;
89 std::vector<float> inputLayerNormWeights;
90 bool hasForgetLayerNormWeights = false;
91 std::vector<float> forgetLayerNormWeights;
92 bool hasCellLayerNormWeights = false;
93 std::vector<float> cellLayerNormWeights;
94 bool hasOutputLayerNormWeights = false;
95 std::vector<float> outputLayerNormWeights;
96
97 std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
98 3., 2., 1., 2., 3., 4.,
99 5., 4., 3., 2., 1., 2. };
100 std::vector<float> expectedOutputValues = { -0.0714901f, -0.162117f, -0.175168f, -0.0232934f,
101 -0.168107f, -0.414129f, -0.549875f, -0.00803579f,
102 -0.0668735f, 0.204078f, -0.42765f, -0.0312321f,
103 -0.120003f, -0.0941918f, -0.456391f, -0.0287019f,
104 -0.0342921f, 0.20824f, -0.656989f, -0.00415265f,
105 -0.10493f, 0.14211f, -0.583478f, -0.0329754f };
106
107 tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
108 float clippingThresCell = 10.f;
109 float clippingThresProj = 0.f;
110 bool isTimeMajor = false;
111
112 UnidirectionalSequenceLstmTestImpl<float>(backends,
113 ::tflite::TensorType_FLOAT32,
114 batchSize,
115 timeSize,
116 inputSize,
117 outputSize,
118 numUnits,
119 hasInputToInputWeights,
120 inputToInputWeights,
121 inputToForgetWeights,
122 inputToCellWeights,
123 inputToOutputWeights,
124 hasRecurrentToInputWeights,
125 recurrentToInputWeights,
126 recurrentToForgetWeights,
127 recurrentToCellWeights,
128 recurrentToOutputWeights,
129 hasCellToInputWeights,
130 cellToInputWeights,
131 hasCellToForgetWeights,
132 cellToForgetWeights,
133 hasCellToOutputWeights,
134 cellToOutputWeights,
135 hasInputGateBias,
136 inputGateBias,
137 forgetGateBias,
138 cellBias,
139 outputGateBias,
140 hasProjectionWeights,
141 projectionWeights,
142 hasProjectionBias,
143 projectionBias,
144 hasInputLayerNormWeights,
145 inputLayerNormWeights,
146 hasForgetLayerNormWeights,
147 forgetLayerNormWeights,
148 hasCellLayerNormWeights,
149 cellLayerNormWeights,
150 hasOutputLayerNormWeights,
151 outputLayerNormWeights,
152 inputValues,
153 expectedOutputValues,
154 activationFunction,
155 clippingThresCell,
156 clippingThresProj,
157 isTimeMajor);
158}
159
160void UnidirectionalSequenceLstmTimeMajorTest(std::vector<armnn::BackendId>& backends)
161{
162 int32_t batchSize = 3;
163 int32_t timeSize = 2;
164 int32_t inputSize = 3;
165 int32_t outputSize = 4;
166 // cellSize and outputSize have the same size when there is no projection.
167 int32_t numUnits = outputSize;
168
169 std::vector<int32_t> inputShape = {timeSize, batchSize, inputSize};
170 std::vector<int32_t> cellStateInTensorInfo = {batchSize, numUnits};
171 std::vector<int32_t> outputStateInTensorInfo = {batchSize, outputSize};
172
173 std::vector<int32_t> outputTensorInfo = {timeSize, batchSize, outputSize};
174
175 //tensorInfo12
176 bool hasInputToInputWeights = true;
177 std::vector<float> inputToInputWeights = { 0.27277296781539917f, 0.3813590407371521f, -0.394489049911499f,
178 0.2782636880874634f, -0.3793870210647583f, -0.018918335437774658f,
179 0.2724653482437134f, -0.19314253330230713f, -0.2947450876235962f,
180 -0.30253493785858154f, 0.4241350293159485f, -0.22560018301010132f };
181
182 std::vector<float> inputToForgetWeights = { -0.2667974531650543f, -0.05505800247192383f, -0.20932340621948242f,
183 -0.14345619082450867f, 0.09666192531585693f, -0.2604355812072754f,
184 -0.2681812047958374f, -0.3314584493637085f, 0.4485899806022644f,
185 -0.23467743396759033f, 0.5072842240333557f, -0.4192768931388855f };
186
187 std::vector<float> inputToCellWeights = { -0.15782442688941956f, -0.027530014514923096f, 0.4789854884147644f,
188 0.23227906227111816f, 0.28259342908859253f, -0.030095696449279785f,
189 0.10071521997451782f, -0.08535495400428772f, 0.18563997745513916f,
190 -0.3049069046974182f, -0.478048175573349f, 0.025234103202819824f };
191
192 std::vector<float> inputToOutputWeights = { -0.04584759473800659f, -0.2716066539287567f, 0.012970447540283203f,
193 -0.4729190170764923f, -0.37422770261764526f, 0.49352723360061646f,
194 0.3163864016532898f, -0.436781644821167f, -0.33074596524238586f,
195 -0.32885751128196716f, -0.40959352254867554f, -0.2124689817428589f };
196
197 //tensorInfo16
198 bool hasRecurrentToInputWeights = true;
199 std::vector<float> recurrentToInputWeights = { 0.23788475990f, -0.24948765337f, 0.50044941902f, 0.14431896805f,
200 -0.115940228137f, -0.717082679f, -0.17208620906f, 0.17850610617f,
201 -0.16702319684f, -0.11384502053f, -0.309785276245f, -0.3316611672f,
202 0.52380162477f, -0.06839632987f, -0.391478359627f, -0.10756178963f };
203
204 std::vector<float> recurrentToForgetWeights = { 0.11383482068f, 0.1676601767f, -0.08550968004f, 0.03399394089f,
205 0.08042152225f, -0.2133381964f, 0.05182432704f, 0.38161808255f,
206 -0.5018365979f, -0.08043262364f, 0.07894329014f, -0.07547105155f,
207 0.12047368288f, 0.2986997961f, 0.0485043078f, -0.13372567296f };
208
209 std::vector<float> recurrentToCellWeights = { 0.0433832928545f, 0.07587072294f, -0.120520234107f, 0.604576051f,
210 -0.434353142986f, 0.009314475068f, 0.005085289478f, 0.08488202038f,
211 -0.00025437487886f, 0.15245915082f, -0.1936587542f, 0.004754020f,
212 -0.1582719236f, 0.3307867646f, 0.0236605107784f, 0.307716339826f };
213
214 std::vector<float> recurrentToOutputWeights = { -0.079031050201f, 0.041414566286f, -0.583727357285f, 0.1025384515f,
215 -0.172372072937f, 0.09214124082f, 0.178184121827f, -0.2439443916f,
216 0.104485116899f, 0.2600405514f, 0.064414866268f, 0.24141204357f,
217 0.281875759363f, -0.14234502664f, 0.15126448862f, -0.24421440064f };
218 // tensorInfo4
219 bool hasCellToInputWeights = false;
220 std::vector<float> cellToInputWeights;
221 bool hasCellToForgetWeights = false;
222 std::vector<float> cellToForgetWeights;
223 bool hasCellToOutputWeights = false;
224 std::vector<float> cellToOutputWeights;
225
226 bool hasInputGateBias = true;
227 std::vector<float> inputGateBias = {0., 0., 0., 0.};
228 std::vector<float> forgetGateBias = {1., 1., 1., 1.};
229 std::vector<float> cellBias = {0., 0., 0., 0.};
230 std::vector<float> outputGateBias = {0., 0., 0., 0.};
231
232 bool hasProjectionWeights = false;
233 std::vector<float> projectionWeights;
234 bool hasProjectionBias = false;
235 std::vector<float> projectionBias;
236
237 bool hasInputLayerNormWeights = false;
238 std::vector<float> inputLayerNormWeights;
239 bool hasForgetLayerNormWeights = false;
240 std::vector<float> forgetLayerNormWeights;
241 bool hasCellLayerNormWeights = false;
242 std::vector<float> cellLayerNormWeights;
243 bool hasOutputLayerNormWeights = false;
244 std::vector<float> outputLayerNormWeights;
245
246 std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
247 3., 2., 1., 2., 3., 4.,
248 5., 4., 3., 2., 1., 2. };
249 std::vector<float> expectedOutputValues = { 0.135658f, 0.124673f, 0.021209f, -0.0530204f,
250 0.106138f, 0.0404792f, 0.0151644f, -0.00675166f,
251 -0.0128514f, 0.0644884f, 0.0709072f, -0.0454045f,
252 0.162886f, 0.166494f, 0.0277046f, -0.0369807f,
253 0.111716f, 0.043119f, 0.0762981f, -0.0122854f,
254 0.104397f, 0.2144f, 0.119192f, -0.0839058f };
255
256 tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
257 float clippingThresCell = 10.f;
258 float clippingThresProj = 0.f;
259 bool isTimeMajor = true;
260
261 UnidirectionalSequenceLstmTestImpl<float>(backends,
262 ::tflite::TensorType_FLOAT32,
263 batchSize,
264 timeSize,
265 inputSize,
266 outputSize,
267 numUnits,
268 hasInputToInputWeights,
269 inputToInputWeights,
270 inputToForgetWeights,
271 inputToCellWeights,
272 inputToOutputWeights,
273 hasRecurrentToInputWeights,
274 recurrentToInputWeights,
275 recurrentToForgetWeights,
276 recurrentToCellWeights,
277 recurrentToOutputWeights,
278 hasCellToInputWeights,
279 cellToInputWeights,
280 hasCellToForgetWeights,
281 cellToForgetWeights,
282 hasCellToOutputWeights,
283 cellToOutputWeights,
284 hasInputGateBias,
285 inputGateBias,
286 forgetGateBias,
287 cellBias,
288 outputGateBias,
289 hasProjectionWeights,
290 projectionWeights,
291 hasProjectionBias,
292 projectionBias,
293 hasInputLayerNormWeights,
294 inputLayerNormWeights,
295 hasForgetLayerNormWeights,
296 forgetLayerNormWeights,
297 hasCellLayerNormWeights,
298 cellLayerNormWeights,
299 hasOutputLayerNormWeights,
300 outputLayerNormWeights,
301 inputValues,
302 expectedOutputValues,
303 activationFunction,
304 clippingThresCell,
305 clippingThresProj,
306 isTimeMajor);
307}
308
309void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(std::vector<armnn::BackendId>& backends)
310{
311 int32_t batchSize = 2;
312 int32_t timeSize = 3;
313 int32_t inputSize = 4;
314 int32_t outputSize = 5;
315 int32_t numUnits = 6;
316
317 std::vector<int32_t> inputShape = {batchSize, timeSize, inputSize};
318 std::vector<int32_t> cellStateInTensorInfo = {batchSize, numUnits};
319 std::vector<int32_t> outputStateInTensorInfo = {batchSize, outputSize};
320
321 std::vector<int32_t> outputTensorInfo = {batchSize, timeSize, outputSize};
322
323 //tensorInfoInputSize,
324 bool hasInputToInputWeights = true;
325 std::vector<float> inputToInputWeights = { 0.021393683f, 0.06124551f, 0.046905167f, -0.014657677f,
326 -0.03149463f, 0.09171803f, 0.14647801f, 0.10797193f,
327 -0.0057968358f, 0.0019193048f, -0.2726754f, 0.10154029f,
328 -0.018539885f, 0.080349885f, -0.10262385f, -0.022599787f,
329 -0.09121155f, -0.008675967f, -0.045206103f, -0.0821282f,
330 -0.008045952f, 0.015478081f, 0.055217247f, 0.038719587f };
331
332 std::vector<float> inputToForgetWeights = { -0.0018401089f, -0.004852237f, 0.03698424f, 0.014181704f,
333 0.028273236f, -0.016726194f, -0.05249759f, -0.10204261f,
334 0.00861066f, -0.040979505f, -0.009899187f, 0.01923892f,
335 -0.028177269f, -0.08535103f, -0.14585495f, 0.10662567f,
336 -0.01909731f, -0.017883534f, -0.0047269356f, -0.045103323f,
337 0.0030784295f, 0.076784775f, 0.07463696f, 0.094531395f};
338
339 std::vector<float> inputToCellWeights = { -0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
340 -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
341 -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
342 -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
343 -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
344 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f };
345
346 std::vector<float> inputToOutputWeights = { -0.0998932f, -0.07201956f, -0.052803773f, -0.15629593f,
347 -0.15001918f, -0.07650751f, 0.02359855f, -0.075155355f,
348 -0.08037709f, -0.15093534f, 0.029517552f, -0.04751393f,
349 0.010350531f, -0.02664851f, -0.016839722f, -0.023121163f,
350 0.0077019283f, 0.012851257f, -0.05040649f, -0.0129761f,
351 -0.021737747f, -0.038305793f, -0.06870586f, -0.01481247f };
352
353 //tensorInfoOutputSize,
354 bool hasRecurrentToInputWeights = true;
355 std::vector<float> recurrentToInputWeights = { -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
356 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
357 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
358 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
359 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f,
360 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
361 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
362 0.14283475f, -0.07390571f };
363
364 std::vector<float> recurrentToForgetWeights = { -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
365 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
366 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
367 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
368 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f,
369 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
370 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
371 0.061878487f, -0.04729229f };
372
373 std::vector<float> recurrentToCellWeights = { -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
374 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
375 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
376 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
377 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
378 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
379 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
380 -0.019443132f, -0.030755889f };
381
382 std::vector<float> recurrentToOutputWeights = { 0.025825322f, -0.05813119f, 0.09495884f,
383 -0.045984812f,-0.01255415f, -0.0026479573f,
384 -0.08196161f, -0.054914974f, -0.0046604523f,
385 -0.029587349f, -0.044576716f, -0.07480124f,
386 -0.082868785f, 0.023254942f, 0.027502948f,
387 -0.0039728214f, -0.08683098f, -0.08116779f,
388 -0.014675607f, -0.037924774f, -0.023314456f,
389 -0.007401714f, -0.09255757f, 0.029460307f,
390 -0.08829125f, -0.005139627f, -0.08989442f,
391 -0.0555066f, 0.13596267f, 0.025062224f };
392 // tensorInfoNumUnits
393 bool hasCellToInputWeights = true;
394 std::vector<float> cellToInputWeights = { 0.040369894f, 0.030746894f, 0.24704495f,
395 0.018586371f, -0.037586458f, -0.15312155f };
396 bool hasCellToForgetWeights = true;
397 std::vector<float> cellToForgetWeights = { -0.01998659f, -0.15568835f, -0.24248174f,
398 -0.012770197f, 0.041331276f, -0.072311886f };
399 bool hasCellToOutputWeights = true;
400 std::vector<float> cellToOutputWeights = { 0.08286371f, -0.08261836f, -0.51210177f,
401 0.002913762f, 0.17764764f, -0.5495371f };
402
403 bool hasInputGateBias = true;
404 std::vector<float> inputGateBias = { 0.02234832f, 0.14757581f, 0.18176508f,
405 0.10380666f, 0.053110216f, -0.06928846f };
406 std::vector<float> forgetGateBias = { 0.035185695f, -0.042891346f, -0.03032477f,
407 0.23027696f, 0.11098921f, 0.08989442f };
408 std::vector<float> cellBias = { -0.024379363f, 0.0055531194f, 0.23377132f,
409 0.033463873f, -0.1483596f, 0.029460307f };
410 std::vector<float> outputGateBias = { 0.046159424f, -0.0012809046f, 0.03563469f,
411 0.12648113f, 0.027195795f, 0.35373217f };
412
413 bool hasProjectionWeights = true;
414 std::vector<float> projectionWeights = { -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
415 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
416 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
417 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
418 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
419 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f };
420
421 bool hasProjectionBias = true;
422 std::vector<float> projectionBias(outputSize, 0.f);
423
424 bool hasInputLayerNormWeights = false;
425 std::vector<float> inputLayerNormWeights;
426 bool hasForgetLayerNormWeights = false;
427 std::vector<float> forgetLayerNormWeights;
428 bool hasCellLayerNormWeights = false;
429 std::vector<float> cellLayerNormWeights;
430 bool hasOutputLayerNormWeights = false;
431 std::vector<float> outputLayerNormWeights;
432
433 std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
434 3., 2., 1., 2., 3., 4.,
435 5., 4., 3., 2., 1., 2.,
436 1., 2., 3., 4., 5., 4.};
437 std::vector<float> expectedOutputValues = { -0.0135612f, -0.0263441f, 0.0314008f, -0.00883455f, 0.00763052f,
438 -0.00126877f, -0.0292959f, 0.0449957f, -0.00976195f, -0.00492338f,
439 -0.0175702f, -0.0431753f, 0.0597117f, -0.0169154f, 0.0142087f,
440 0.00472515f, -0.0196355f, 0.0342524f, -0.00407936f, -0.0253189f,
441 -0.00512944f, -0.0293754f, 0.0512771f, -0.0151874f, -0.0246433f,
442 -0.00744986f, -0.0345103f, 0.0450666f, -0.00944991f, 0.0126895f };
443
444 tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
445 float clippingThresCell = 10.f;
446 float clippingThresProj = 0.f;
447 bool isTimeMajor = false;
448
449 UnidirectionalSequenceLstmTestImpl<float>(backends,
450 ::tflite::TensorType_FLOAT32,
451 batchSize,
452 timeSize,
453 inputSize,
454 outputSize,
455 numUnits,
456 hasInputToInputWeights,
457 inputToInputWeights,
458 inputToForgetWeights,
459 inputToCellWeights,
460 inputToOutputWeights,
461 hasRecurrentToInputWeights,
462 recurrentToInputWeights,
463 recurrentToForgetWeights,
464 recurrentToCellWeights,
465 recurrentToOutputWeights,
466 hasCellToInputWeights,
467 cellToInputWeights,
468 hasCellToForgetWeights,
469 cellToForgetWeights,
470 hasCellToOutputWeights,
471 cellToOutputWeights,
472 hasInputGateBias,
473 inputGateBias,
474 forgetGateBias,
475 cellBias,
476 outputGateBias,
477 hasProjectionWeights,
478 projectionWeights,
479 hasProjectionBias,
480 projectionBias,
481 hasInputLayerNormWeights,
482 inputLayerNormWeights,
483 hasForgetLayerNormWeights,
484 forgetLayerNormWeights,
485 hasCellLayerNormWeights,
486 cellLayerNormWeights,
487 hasOutputLayerNormWeights,
488 outputLayerNormWeights,
489 inputValues,
490 expectedOutputValues,
491 activationFunction,
492 clippingThresCell,
493 clippingThresProj,
494 isTimeMajor);
495}
496
497void UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(std::vector<armnn::BackendId>& backends)
498{
499 int32_t batchSize = 3;
500 int32_t timeSize = 2;
501 int32_t inputSize = 3;
502 int32_t outputSize = 4;
503 // cellSize and outputSize have the same size when there is no projection.
504 int32_t numUnits = outputSize;
505
506 //tensorInfo12
507 bool hasInputToInputWeights = false;
508 std::vector<float> inputToInputWeights{};
509
510 std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
511 -0.3810434485f, 0.268383264f, -0.009807467424f,
512 -0.3522925403f, -0.24275735512f, -0.28344226125f,
513 0.13512269116f, -0.4932442977f, -0.10039821991f };
514
515 std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
516 0.386399507f, -0.259465157985f, -0.16545993089f,
517 -0.4230232555f, 0.341664791103f, -0.18127849691f,
518 -0.2277662414f, -0.55275535589f, 0.34184026718f };
519
520 std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
521 0.53969591851f, 0.23393625035f, -0.27140527306f,
522 0.50009280443f, 0.07511717046f, 0.3998299249f,
523 -0.51717478049f, 0.1889653282f, -0.367323637f };
524
525 //tensorInfo16
526 bool hasRecurrentToInputWeights = false;
527 std::vector<float> recurrentToInputWeights{};
528
529 std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
530 -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
531 -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
532 -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
533
534 std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
535 -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
536 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
537 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
538
539 std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
540 -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
541 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
542 -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
543 // tensorInfo4
544 bool hasCellToInputWeights = false;
545 std::vector<float> cellToInputWeights;
546 bool hasCellToForgetWeights = true;
547 std::vector<float> cellToForgetWeights = {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f};
548 bool hasCellToOutputWeights = true;
549 std::vector<float> cellToOutputWeights = {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f};
550
551 bool hasInputGateBias = false;
552 std::vector<float> inputGateBias;
553 std::vector<float> forgetGateBias = {1., 1., 1., 1.};
554 std::vector<float> cellBias = {0., 0., 0., 0.};
555 std::vector<float> outputGateBias = {0., 0., 0., 0.};
556
557 bool hasProjectionWeights = false;
558 std::vector<float> projectionWeights;
559 bool hasProjectionBias = false;
560 std::vector<float> projectionBias;
561
562 bool hasInputLayerNormWeights = false;
563 std::vector<float> inputLayerNormWeights;
564 bool hasForgetLayerNormWeights = false;
565 std::vector<float> forgetLayerNormWeights;
566 bool hasCellLayerNormWeights = false;
567 std::vector<float> cellLayerNormWeights;
568 bool hasOutputLayerNormWeights = false;
569 std::vector<float> outputLayerNormWeights;
570
571 std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
572 3., 2., 1., 2., 3., 4.,
573 5., 4., 3., 2., 1., 2. };
574 std::vector<float> expectedOutputValues = { -0.0129257f, -0.070531f, -0.153508f, -0.0392391f,
575 -0.0300169f, -0.195717f, -0.528679f, -0.0818106f,
576 -0.0332748f, 0.155429f, -0.353966f, -0.0801505f,
577 -0.032312f, -0.0407911f, -0.435053f, -0.0932317f,
578 -0.0108233f, 0.165584f, -0.640424f, -0.0447535f,
579 -0.031675f, 0.125987f, -0.526695f, -0.110093f };
580
581 tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
582 float clippingThresCell = 10.f;
583 float clippingThresProj = 0.f;
584 bool isTimeMajor = false;
585
586 UnidirectionalSequenceLstmTestImpl<float>(backends,
587 ::tflite::TensorType_FLOAT32,
588 batchSize,
589 timeSize,
590 inputSize,
591 outputSize,
592 numUnits,
593 hasInputToInputWeights,
594 inputToInputWeights,
595 inputToForgetWeights,
596 inputToCellWeights,
597 inputToOutputWeights,
598 hasRecurrentToInputWeights,
599 recurrentToInputWeights,
600 recurrentToForgetWeights,
601 recurrentToCellWeights,
602 recurrentToOutputWeights,
603 hasCellToInputWeights,
604 cellToInputWeights,
605 hasCellToForgetWeights,
606 cellToForgetWeights,
607 hasCellToOutputWeights,
608 cellToOutputWeights,
609 hasInputGateBias,
610 inputGateBias,
611 forgetGateBias,
612 cellBias,
613 outputGateBias,
614 hasProjectionWeights,
615 projectionWeights,
616 hasProjectionBias,
617 projectionBias,
618 hasInputLayerNormWeights,
619 inputLayerNormWeights,
620 hasForgetLayerNormWeights,
621 forgetLayerNormWeights,
622 hasCellLayerNormWeights,
623 cellLayerNormWeights,
624 hasOutputLayerNormWeights,
625 outputLayerNormWeights,
626 inputValues,
627 expectedOutputValues,
628 activationFunction,
629 clippingThresCell,
630 clippingThresProj,
631 isTimeMajor);
632}
633
634void UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest(
635 std::vector<armnn::BackendId>& backends)
636{
637 int32_t batchSize = 3;
638 int32_t timeSize = 2;
639 int32_t inputSize = 3;
640 int32_t outputSize = 4;
641 int32_t numUnits = 5;
642
643 //tensorInfo15
644 bool hasInputToInputWeights = true;
645 std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
646 -0.117484632f, 0.3298470976f, -0.1179017122f,
647 0.214305695f, 0.42135173085f, 0.003878414626f,
648 -0.348303917f, -0.1881275477f, 0.0343011027f,
649 -0.38837709614f, -0.05636804124f, 0.4259087456f};
650
651 std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
652 -0.3810434485f, 0.268383264f, -0.009807467424f,
653 -0.3522925403f, -0.24275735512f, -0.28344226125f,
654 0.13512269116f, -0.4932442977f, -0.10039821991f,
655 0.2726137042f, 0.09216640889f, -0.06551410215f};
656
657 std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
658 0.386399507f, -0.259465157985f, -0.16545993089f,
659 -0.4230232555f, 0.341664791103f, -0.18127849691f,
660 -0.2277662414f, -0.55275535589f, 0.34184026718f,
661 0.3954237699f, -0.19407111404f, 0.30412107706f};
662
663 std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
664 0.53969591851f, 0.23393625035f, -0.27140527306f,
665 0.50009280443f, 0.07511717046f, 0.3998299249f,
666 -0.51717478049f, 0.1889653282f, -0.367323637f,
667 -0.12584099173f, -0.12319286912f, 0.2407919466f};
668
669 //tensorInfo20
670 bool hasRecurrentToInputWeights = true;
671 std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
672 -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
673 0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
674 0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f,
675 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f };
676
677 std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
678 -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
679 -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
680 -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f,
681 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f };
682
683 std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
684 -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
685 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
686 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f,
687 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f };
688
689 std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
690 -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
691 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
692 -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f,
693 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f };
694 // tensorInfo5
695 bool hasCellToInputWeights = true;
696 std::vector<float> cellToInputWeights = { 0.05f, 0.1f, 0.25f, 0.15f, -0.02f };
697 bool hasCellToForgetWeights = true;
698 std::vector<float> cellToForgetWeights = { -0.02f, -0.15f, -0.25f, -0.03f, 0.15f };
699 bool hasCellToOutputWeights = true;
700 std::vector<float> cellToOutputWeights = { 0.1f, -0.1f, -0.5f, 0.05f, 0.01f };
701
702 bool hasInputGateBias = true;
703 std::vector<float> inputGateBias = { 0.03f, 0.15f, 0.22f, 0.38f, 0.05f };
704 std::vector<float> forgetGateBias = { 0.1f, -0.3f, -0.2f, 0.1f, 0.4f };
705 std::vector<float> cellBias = { -0.05f, 0.72f, 0.25f, 0.08f, 0.1f };
706 std::vector<float> outputGateBias = { 0.05f, -0.01f, 0.2f, 0.1f, -0.2f };
707
708 bool hasProjectionWeights = true;
709 std::vector<float> projectionWeights = { -0.1f, 0.2f, 0.01f, -0.2f,
710 0.1f, 0.5f, 0.3f, 0.08f,
711 0.07f, 0.2f, -0.4f, 0.2f,
712 0.5f, -0.4f, 0.3f, -0.2f,
713 0.3f, 0.08f, -0.07f, 0.2f}; //{outputSize, numUnits}
714 bool hasProjectionBias = true;
715 std::vector<float> projectionBias(outputSize, 0.f);;
716
717 bool hasInputLayerNormWeights = true;
718 std::vector<float> inputLayerNormWeights = { 0.1f, 0.2f, 0.3f, 0.5f, 0.8f };
719 bool hasForgetLayerNormWeights = true;
720 std::vector<float> forgetLayerNormWeights = { 0.1f, 0.2f, 0.3f, 0.5f, 0.2f };
721 bool hasCellLayerNormWeights = true;
722 std::vector<float> cellLayerNormWeights = { 0.7f, 0.2f, 0.3f, 0.8f, 0.5f };
723 bool hasOutputLayerNormWeights = true;
724 std::vector<float> outputLayerNormWeights = { 0.6f, 0.2f, 0.2f, 0.5f, 0.1f };
725
726 std::vector<float> inputValues = { 1., 2., 3., 4., 5., 4.,
727 3., 2., 1., 2., 3., 4.,
728 5., 4., 3., 2., 1., 2. };
729 std::vector<float> expectedOutputValues = { 0.0642256f, 0.0343966f, 0.184122f, 0.114717f,
730 0.11458f, 0.0407109f, 0.300327f, 0.174301f,
731 0.0864761f, 0.0362912f, 0.178635f, 0.115689f,
732 0.108008f, 0.0386623f, 0.273471f, 0.167115f,
733 0.0859545f, 0.0331481f, 0.186051f, 0.11888f,
734 0.106649f, 0.0276847f, 0.229863f, 0.166958f };
735
736 tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
737 float clippingThresCell = 10.f;
738 float clippingThresProj = 0.f;
739 bool isTimeMajor = false;
740
741 UnidirectionalSequenceLstmTestImpl<float>(backends,
742 ::tflite::TensorType_FLOAT32,
743 batchSize,
744 timeSize,
745 inputSize,
746 outputSize,
747 numUnits,
748 hasInputToInputWeights,
749 inputToInputWeights,
750 inputToForgetWeights,
751 inputToCellWeights,
752 inputToOutputWeights,
753 hasRecurrentToInputWeights,
754 recurrentToInputWeights,
755 recurrentToForgetWeights,
756 recurrentToCellWeights,
757 recurrentToOutputWeights,
758 hasCellToInputWeights,
759 cellToInputWeights,
760 hasCellToForgetWeights,
761 cellToForgetWeights,
762 hasCellToOutputWeights,
763 cellToOutputWeights,
764 hasInputGateBias,
765 inputGateBias,
766 forgetGateBias,
767 cellBias,
768 outputGateBias,
769 hasProjectionWeights,
770 projectionWeights,
771 hasProjectionBias,
772 projectionBias,
773 hasInputLayerNormWeights,
774 inputLayerNormWeights,
775 hasForgetLayerNormWeights,
776 forgetLayerNormWeights,
777 hasCellLayerNormWeights,
778 cellLayerNormWeights,
779 hasOutputLayerNormWeights,
780 outputLayerNormWeights,
781 inputValues,
782 expectedOutputValues,
783 activationFunction,
784 clippingThresCell,
785 clippingThresProj,
786 isTimeMajor);
787}
788
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +0100789void UnidirectionalSequenceLstmInt8Test(std::vector<armnn::BackendId>& backends)
790{
791 int32_t batchSize = 3;
792 int32_t timeSize = 2;
793 int32_t inputSize = 3;
794 int32_t outputSize = 4;
795 // cellSize and outputSize have the same size when there is no projection.
796 int32_t numUnits = outputSize;
Narumol Prangnawarat7684b182021-08-12 14:48:15 +0100797
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +0100798 //tensorInfo12
799 bool hasInputToInputWeights = true;
800 std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
801
802 std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
803
804 std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
805
806 std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
807
808 //tensorInfo16
809 bool hasRecurrentToInputWeights = true;
810 std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
811
812 std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
813
814 std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
815
816 std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
817
818 // tensorInfo4
819 bool hasCellToInputWeights = false;
820 std::vector<int8_t> cellToInputWeights;
821 bool hasCellToForgetWeights = false;
822 std::vector<int8_t> cellToForgetWeights;
823 bool hasCellToOutputWeights = false;
824 std::vector<int8_t> cellToOutputWeights;
825
826 bool hasInputGateBias = true;
827 std::vector<float> inputGateBias = { 0., 0., 0., 0. };
828 std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
829 std::vector<float> cellBias = { 0., 0., 0., 0. };
830 std::vector<float> outputGateBias = { 0., 0., 0., 0. };
831
832 bool hasProjectionWeights = false;
833 std::vector<int8_t> projectionWeights;
834 bool hasProjectionBias = false;
835 std::vector<float> projectionBias;
836
837 bool hasInputLayerNormWeights = false;
838 std::vector<float> inputLayerNormWeights;
839 bool hasForgetLayerNormWeights = false;
840 std::vector<float> forgetLayerNormWeights;
841 bool hasCellLayerNormWeights = false;
842 std::vector<float> cellLayerNormWeights;
843 bool hasOutputLayerNormWeights = false;
844 std::vector<float> outputLayerNormWeights;
845
846 std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
847 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
848 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
849
850 std::vector<float> expectedOutputValues = { -0.0142517f, -0.0198845f, -0.0120569f, -0.0116868f,
851 -0.0350714f, -0.0343202f, -0.047504f, -0.0569789f,
852 -0.0146346f, 0.0106663f, -0.0247238f, -0.0319502f,
853 -0.0294759f, -0.0129935f, -0.0444175f, -0.0444354f,
854 -0.0280855f, 0.00545101f, -0.051422f, -0.0463838f,
855 -0.0310702f, 0.00915739f, -0.0625207f, -0.0482648f };
856
857 tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
858 float clippingThresCell = 10.f;
859 float clippingThresProj = 0.f;
860 bool isTimeMajor = false;
861
862 UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
863 ::tflite::TensorType_INT8,
864 batchSize,
865 timeSize,
866 inputSize,
867 outputSize,
868 numUnits,
869 hasInputToInputWeights,
870 inputToInputWeights,
871 inputToForgetWeights,
872 inputToCellWeights,
873 inputToOutputWeights,
874 hasRecurrentToInputWeights,
875 recurrentToInputWeights,
876 recurrentToForgetWeights,
877 recurrentToCellWeights,
878 recurrentToOutputWeights,
879 hasCellToInputWeights,
880 cellToInputWeights,
881 hasCellToForgetWeights,
882 cellToForgetWeights,
883 hasCellToOutputWeights,
884 cellToOutputWeights,
885 hasInputGateBias,
886 inputGateBias,
887 forgetGateBias,
888 cellBias,
889 outputGateBias,
890 hasProjectionWeights,
891 projectionWeights,
892 hasProjectionBias,
893 projectionBias,
894 hasInputLayerNormWeights,
895 inputLayerNormWeights,
896 hasForgetLayerNormWeights,
897 forgetLayerNormWeights,
898 hasCellLayerNormWeights,
899 cellLayerNormWeights,
900 hasOutputLayerNormWeights,
901 outputLayerNormWeights,
902 inputValues,
903 expectedOutputValues,
904 activationFunction,
905 clippingThresCell,
906 clippingThresProj,
907 isTimeMajor,
908 0.1f);
909}
910
911void UnidirectionalSequenceLstmInt8TimeMajorTest(std::vector<armnn::BackendId>& backends)
912{
913 int32_t batchSize = 3;
914 int32_t timeSize = 2;
915 int32_t inputSize = 3;
916 int32_t outputSize = 4;
917 // cellSize and outputSize have the same size when there is no projection.
918 int32_t numUnits = outputSize;
919
920 //tensorInfo12
921 bool hasInputToInputWeights = true;
922 std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
923
924 std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
925
926 std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
927
928 std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
929
930 //tensorInfo16
931 bool hasRecurrentToInputWeights = true;
932 std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
933
934 std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
935
936 std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
937
938 std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
939
940 // tensorInfo4
941 bool hasCellToInputWeights = false;
942 std::vector<int8_t> cellToInputWeights;
943 bool hasCellToForgetWeights = false;
944 std::vector<int8_t> cellToForgetWeights;
945 bool hasCellToOutputWeights = false;
946 std::vector<int8_t> cellToOutputWeights;
947
948 bool hasInputGateBias = true;
949 std::vector<float> inputGateBias = { 0., 0., 0., 0. };
950 std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
951 std::vector<float> cellBias = { 0., 0., 0., 0. };
952 std::vector<float> outputGateBias = { 0., 0., 0., 0. };
953
954 bool hasProjectionWeights = false;
955 std::vector<int8_t> projectionWeights;
956 bool hasProjectionBias = false;
957 std::vector<float> projectionBias;
958
959 bool hasInputLayerNormWeights = false;
960 std::vector<float> inputLayerNormWeights;
961 bool hasForgetLayerNormWeights = false;
962 std::vector<float> forgetLayerNormWeights;
963 bool hasCellLayerNormWeights = false;
964 std::vector<float> cellLayerNormWeights;
965 bool hasOutputLayerNormWeights = false;
966 std::vector<float> outputLayerNormWeights;
967
968 std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
969 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
970 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
971
972 std::vector<float> expectedOutputValues = { -0.0142517f, -0.0198845f, -0.0120122f, -0.0116868f,
973 -0.0261295f, -0.0188487f, -0.0345463f, -0.049733f,
974 -0.0146346f, 0.0106663f, -0.0247238f, -0.0319502f,
975 -0.0291863f, -0.0369402f, -0.0354071f, -0.0296529f,
976 -0.0419539f, -0.00617731f, -0.0814796f, -0.0804005f,
977 -0.0244737f, 0.0119905f, -0.0457527f, -0.0331862f };
978
979 tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
980 float clippingThresCell = 10.f;
981 float clippingThresProj = 0.f;
982 bool isTimeMajor = true;
983
984 UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
985 ::tflite::TensorType_INT8,
986 batchSize,
987 timeSize,
988 inputSize,
989 outputSize,
990 numUnits,
991 hasInputToInputWeights,
992 inputToInputWeights,
993 inputToForgetWeights,
994 inputToCellWeights,
995 inputToOutputWeights,
996 hasRecurrentToInputWeights,
997 recurrentToInputWeights,
998 recurrentToForgetWeights,
999 recurrentToCellWeights,
1000 recurrentToOutputWeights,
1001 hasCellToInputWeights,
1002 cellToInputWeights,
1003 hasCellToForgetWeights,
1004 cellToForgetWeights,
1005 hasCellToOutputWeights,
1006 cellToOutputWeights,
1007 hasInputGateBias,
1008 inputGateBias,
1009 forgetGateBias,
1010 cellBias,
1011 outputGateBias,
1012 hasProjectionWeights,
1013 projectionWeights,
1014 hasProjectionBias,
1015 projectionBias,
1016 hasInputLayerNormWeights,
1017 inputLayerNormWeights,
1018 hasForgetLayerNormWeights,
1019 forgetLayerNormWeights,
1020 hasCellLayerNormWeights,
1021 cellLayerNormWeights,
1022 hasOutputLayerNormWeights,
1023 outputLayerNormWeights,
1024 inputValues,
1025 expectedOutputValues,
1026 activationFunction,
1027 clippingThresCell,
1028 clippingThresProj,
1029 isTimeMajor,
1030 0.1);
1031}
1032
1033void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest(std::vector<armnn::BackendId>& backends)
1034{
1035 int32_t batchSize = 3;
1036 int32_t timeSize = 2;
1037 int32_t inputSize = 3;
1038 int32_t outputSize = 4;
1039 int32_t numUnits = 4;
1040
1041 bool hasInputToInputWeights = true;
1042 std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
1043
1044 std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
1045
1046 std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
1047
1048 std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
1049
1050 //tensorInfo16
1051 bool hasRecurrentToInputWeights = true;
1052 std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
1053
1054 std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
1055
1056 std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
1057
1058 std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
1059
1060 // tensorInfo4
1061 bool hasCellToInputWeights = true;
1062 std::vector<int8_t> cellToInputWeights = { 5, 10, 25, 15 };
1063 bool hasCellToForgetWeights = true;
1064 std::vector<int8_t> cellToForgetWeights = { -5, 15, 25, 3 };
1065 bool hasCellToOutputWeights = true;
1066 std::vector<int8_t> cellToOutputWeights = { 10, -10, -5, 50 };
1067
1068 bool hasInputGateBias = true;
1069 std::vector<float> inputGateBias = { 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f};
1070 std::vector<float> forgetGateBias = { 0.035185695f, -0.042891346f, -0.3032477f, 0.23027696f};
1071 std::vector<float> cellBias = { -0.124379363f, 0.55531194f, 0.23377132f, 0.033463873f };
1072 std::vector<float> outputGateBias = { 0.046159424f, -0.12809046f, 0.03563469f, 0.12648113f };
1073
1074 bool hasProjectionWeights = true;
1075 std::vector<int8_t> projectionWeights = { -25, 51, 3, -5, 25, 127, 77, 20, 18, 51, -10, 51, -25, 88, 77, -13 };
1076 bool hasProjectionBias = true;
1077 std::vector<float> projectionBias(outputSize, 0.f);
1078
1079 bool hasInputLayerNormWeights = false;
1080 std::vector<float> inputLayerNormWeights;
1081 bool hasForgetLayerNormWeights = false;
1082 std::vector<float> forgetLayerNormWeights;
1083 bool hasCellLayerNormWeights = false;
1084 std::vector<float> cellLayerNormWeights;
1085 bool hasOutputLayerNormWeights = false;
1086 std::vector<float> outputLayerNormWeights;
1087
1088 std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
1089 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
1090 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
1091
1092 std::vector<float> expectedOutputValues = { 0.612103f, 1.56788f, 0.31966f, 1.42956f,
1093 0.909718f, 3.07916f, -0.560586f, 3.8907f,
1094 0.753671f, 1.77485f, 0.365122f, 1.60077f,
1095 0.812644f, 2.79092f, -0.605396f, 3.61742f,
1096 0.791857f, 1.64353f, 0.316588f, 1.55192f,
1097 0.807265f, 2.47012f, -0.539598f, 3.25654f };
1098
1099 tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
1100 float clippingThresCell = 10.f;
1101 float clippingThresProj = 0.f;
1102 bool isTimeMajor = false;
1103
1104 UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
1105 ::tflite::TensorType_INT8,
1106 batchSize,
1107 timeSize,
1108 inputSize,
1109 outputSize,
1110 numUnits,
1111 hasInputToInputWeights,
1112 inputToInputWeights,
1113 inputToForgetWeights,
1114 inputToCellWeights,
1115 inputToOutputWeights,
1116 hasRecurrentToInputWeights,
1117 recurrentToInputWeights,
1118 recurrentToForgetWeights,
1119 recurrentToCellWeights,
1120 recurrentToOutputWeights,
1121 hasCellToInputWeights,
1122 cellToInputWeights,
1123 hasCellToForgetWeights,
1124 cellToForgetWeights,
1125 hasCellToOutputWeights,
1126 cellToOutputWeights,
1127 hasInputGateBias,
1128 inputGateBias,
1129 forgetGateBias,
1130 cellBias,
1131 outputGateBias,
1132 hasProjectionWeights,
1133 projectionWeights,
1134 hasProjectionBias,
1135 projectionBias,
1136 hasInputLayerNormWeights,
1137 inputLayerNormWeights,
1138 hasForgetLayerNormWeights,
1139 forgetLayerNormWeights,
1140 hasCellLayerNormWeights,
1141 cellLayerNormWeights,
1142 hasOutputLayerNormWeights,
1143 outputLayerNormWeights,
1144 inputValues,
1145 expectedOutputValues,
1146 activationFunction,
1147 clippingThresCell,
1148 clippingThresProj,
1149 isTimeMajor,
1150 0.1f);
1151}
1152
1153void UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(std::vector<armnn::BackendId>& backends)
1154{
1155 int32_t batchSize = 3;
1156 int32_t timeSize = 2;
1157 int32_t inputSize = 3;
1158 int32_t outputSize = 4;
1159 // cellSize and outputSize have the same size when there is no projection.
1160 int32_t numUnits = outputSize;
1161
1162 //tensorInfo12,
1163 bool hasInputToInputWeights = false;
1164 std::vector<int8_t> inputToInputWeights;
1165
1166 std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
1167
1168 std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
1169
1170 std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
1171
1172 //tensorInfo16,
1173 bool hasRecurrentToInputWeights = false;
1174 std::vector<int8_t> recurrentToInputWeights;
1175 std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
1176
1177 std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
1178
1179 std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
1180
1181 // tensorInfo4
1182 bool hasCellToInputWeights = false;
1183 std::vector<int8_t> cellToInputWeights;
1184 bool hasCellToForgetWeights = true;
1185 std::vector<int8_t> cellToForgetWeights = { 47, -52, -24, 31 };
1186 bool hasCellToOutputWeights = true;
1187 std::vector<int8_t> cellToOutputWeights = { -17, 82, 85, -77 };
1188
1189 bool hasInputGateBias = false;
1190 std::vector<float> inputGateBias;
1191 std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
1192 std::vector<float> cellBias = { 0., 0., 0., 0. };
1193 std::vector<float> outputGateBias = { 0., 0., 0., 0. };
1194
1195 bool hasProjectionWeights = false;
1196 std::vector<int8_t> projectionWeights;
1197 bool hasProjectionBias = false;
1198 std::vector<float> projectionBias;
1199
1200 bool hasInputLayerNormWeights = false;
1201 std::vector<float> inputLayerNormWeights;
1202 bool hasForgetLayerNormWeights = false;
1203 std::vector<float> forgetLayerNormWeights;
1204 bool hasCellLayerNormWeights = false;
1205 std::vector<float> cellLayerNormWeights;
1206 bool hasOutputLayerNormWeights = false;
1207 std::vector<float> outputLayerNormWeights;
1208
1209 std::vector<float> inputValues = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
1210 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
1211 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
1212
1213 std::vector<float> expectedOutputValues = { -0.0072104f, -0.00991171f, -0.00650478f, -0.00713055f,
1214 -0.0191782f, -0.0161269f, -0.0233683f, -0.054299f,
1215 -0.00783725f, 0.00635271f, -0.0126718f, -0.022613f,
1216 -0.0161351f, -0.00775868f, -0.021054f, -0.0339778f,
1217 -0.0146392f, 0.00330261f, -0.0258733f, -0.0407797f,
1218 -0.0174297f, 0.0050105f, -0.0266275f, -0.0362564f };
1219
1220 tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
1221 float clippingThresCell = 10.f;
1222 float clippingThresProj = 0.f;
1223 bool isTimeMajor = false;
1224
1225 UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
1226 ::tflite::TensorType_INT8,
1227 batchSize,
1228 timeSize,
1229 inputSize,
1230 outputSize,
1231 numUnits,
1232 hasInputToInputWeights,
1233 inputToInputWeights,
1234 inputToForgetWeights,
1235 inputToCellWeights,
1236 inputToOutputWeights,
1237 hasRecurrentToInputWeights,
1238 recurrentToInputWeights,
1239 recurrentToForgetWeights,
1240 recurrentToCellWeights,
1241 recurrentToOutputWeights,
1242 hasCellToInputWeights,
1243 cellToInputWeights,
1244 hasCellToForgetWeights,
1245 cellToForgetWeights,
1246 hasCellToOutputWeights,
1247 cellToOutputWeights,
1248 hasInputGateBias,
1249 inputGateBias,
1250 forgetGateBias,
1251 cellBias,
1252 outputGateBias,
1253 hasProjectionWeights,
1254 projectionWeights,
1255 hasProjectionBias,
1256 projectionBias,
1257 hasInputLayerNormWeights,
1258 inputLayerNormWeights,
1259 hasForgetLayerNormWeights,
1260 forgetLayerNormWeights,
1261 hasCellLayerNormWeights,
1262 cellLayerNormWeights,
1263 hasOutputLayerNormWeights,
1264 outputLayerNormWeights,
1265 inputValues,
1266 expectedOutputValues,
1267 activationFunction,
1268 clippingThresCell,
1269 clippingThresProj,
1270 isTimeMajor,
1271 0.1);
1272}
1273
1274void UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest(
1275 std::vector<armnn::BackendId>& backends)
1276{
1277 int32_t batchSize = 3;
1278 int32_t timeSize = 2;
1279 int32_t inputSize = 3;
1280 int32_t outputSize = 4;
1281 int32_t numUnits = 5;
1282
1283 bool hasInputToInputWeights = true;
1284 std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3, 2, 2, -4 };
1285
1286 std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1, -3, -2, -4 };
1287
1288 std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3, 2, 5, -4 };
1289
1290 std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4, -4, -1, -1 };
1291
1292 bool hasRecurrentToInputWeights = true;
1293 std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3,
1294 5, -1, 1, 3, -1, -1, -1, 4, 2, 3 };
1295
1296 std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3,
1297 5, -1, 1, 3, -2, -1, -1, 2, 2, 1 };
1298
1299 std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2,
1300 1, 2, 3, -2, 3, -3, -1, -5, 1, 3 };
1301
1302 std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3,
1303 -4, -1, -1, -1, 2, -1, 5, 1, -3, -4 };
1304
1305 // tensorInfo5
1306 bool hasCellToInputWeights = true;
1307 std::vector<int8_t> cellToInputWeights = { 5, 3, 8, -5, 2 };
1308 bool hasCellToForgetWeights = true;
1309 std::vector<int8_t> cellToForgetWeights = { -2, -7, 5, -3, 4 };
1310 bool hasCellToOutputWeights = true;
1311 std::vector<int8_t> cellToOutputWeights = { 9, -10 , -5, 5, 1 };
1312
1313 bool hasInputGateBias = true;
1314 std::vector<float> inputGateBias = { 0.03f, 0.15f, 0.22f, 0.38f, 0.05f };
1315 std::vector<float> forgetGateBias = { 0.1f, -0.3f, -0.2f, 0.1f, 0.4f };
1316 std::vector<float> cellBias = { -0.05f, 0.72f, 0.25f, 0.08f, 0.1f };
1317 std::vector<float> outputGateBias = { 0.05f, -0.01f, 0.2f, 0.1f, -0.2f };
1318
1319 bool hasProjectionWeights = true;
1320 std::vector<int8_t> projectionWeights = { -1, 2, 1, -2, 1, 5, 3, 8, 7, 2,
1321 -4, 2, 5, -4, 3, -2, 3, 8, -7, 2 };
1322 bool hasProjectionBias = true;
1323 std::vector<float> projectionBias(outputSize, 0.f);
1324
1325 bool hasInputLayerNormWeights = true;
1326 std::vector<float> inputLayerNormWeights = { 0.1f, 0.2f, -0.3f, -0.1f, 0.5f };
1327 bool hasForgetLayerNormWeights = true;
1328 std::vector<float> forgetLayerNormWeights = { -0.1f, 0.2f, 0.3f, 0.5f, 0.2f };
1329 bool hasCellLayerNormWeights = true;
1330 std::vector<float> cellLayerNormWeights = { 0.5f, 0.2f, 0.3f, 0.4f, -0.5f };
1331 bool hasOutputLayerNormWeights = true;
1332 std::vector<float> outputLayerNormWeights = { 0.6f, -0.2f, -0.2f, 0.5f, 0.1f };
1333
1334 std::vector<float> inputValues = { 1., 8., 3., 4., 5., 4.,
1335 3., 2., 1., 2., 3., 4.,
1336 5., 4., 3., 2., 1., 2. };
1337
1338 std::vector<float> expectedOutputValues = { 0.0471276f, 0.0168155f, 0.0789885f, 0.16550f,
1339 0.0643133f, -0.0400722f, 0.100593f, 0.197722f,
1340 0.0465562f, -0.0600682f, 0.0622087f, 0.115053f,
1341 0.056287f, -0.0566218f, 0.0856832f, 0.148484f,
1342 0.0457859f, -0.0588112f, 0.0623636f, 0.114333f,
1343 0.0509271f, -0.0754262f, 0.058600f, 0.0801288f };
1344
1345 tflite::ActivationFunctionType activationFunction = tflite::ActivationFunctionType_TANH;
1346 float clippingThresCell = 10.f;
1347 float clippingThresProj = 0.f;
1348 bool isTimeMajor = false;
1349
1350 UnidirectionalSequenceLstmTestImpl<int8_t>(backends,
1351 ::tflite::TensorType_INT8,
1352 batchSize,
1353 timeSize,
1354 inputSize,
1355 outputSize,
1356 numUnits,
1357 hasInputToInputWeights,
1358 inputToInputWeights,
1359 inputToForgetWeights,
1360 inputToCellWeights,
1361 inputToOutputWeights,
1362 hasRecurrentToInputWeights,
1363 recurrentToInputWeights,
1364 recurrentToForgetWeights,
1365 recurrentToCellWeights,
1366 recurrentToOutputWeights,
1367 hasCellToInputWeights,
1368 cellToInputWeights,
1369 hasCellToForgetWeights,
1370 cellToForgetWeights,
1371 hasCellToOutputWeights,
1372 cellToOutputWeights,
1373 hasInputGateBias,
1374 inputGateBias,
1375 forgetGateBias,
1376 cellBias,
1377 outputGateBias,
1378 hasProjectionWeights,
1379 projectionWeights,
1380 hasProjectionBias,
1381 projectionBias,
1382 hasInputLayerNormWeights,
1383 inputLayerNormWeights,
1384 hasForgetLayerNormWeights,
1385 forgetLayerNormWeights,
1386 hasCellLayerNormWeights,
1387 cellLayerNormWeights,
1388 hasOutputLayerNormWeights,
1389 outputLayerNormWeights,
1390 inputValues,
1391 expectedOutputValues,
1392 activationFunction,
1393 clippingThresCell,
1394 clippingThresProj,
1395 isTimeMajor,
1396 0.1);
1397}
Narumol Prangnawarat7684b182021-08-12 14:48:15 +01001398
1399TEST_SUITE("UnidirectionalSequenceLstmTest_CpuRefTests")
1400{
1401
1402TEST_CASE ("UnidirectionalSequenceLstmTest_CpuRef_Test")
1403{
1404 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
1405 UnidirectionalSequenceLstmTest(backends);
1406}
1407
1408TEST_CASE ("UnidirectionalSequenceLstmTimeMajorTest_CpuRef_Test")
1409{
1410 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
1411 UnidirectionalSequenceLstmTimeMajorTest(backends);
1412}
1413
1414TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest_CpuRef_Test")
1415{
1416 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
1417 UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionTest(backends);
1418}
1419
1420TEST_CASE ("UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest_CpuRef_Test")
1421{
1422 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
1423 UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(backends);
1424}
1425
1426TEST_CASE ("UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest_CpuRef_Test")
1427{
1428 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
1429 UnidirectionalSequenceLstmNoCifgWithPeepholeWithProjectionWithLayerNormTest(backends);
1430}
1431
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01001432TEST_CASE ("UnidirectionalSequenceLstmInt8Test_CpuRef_Test")
1433{
1434 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
1435 UnidirectionalSequenceLstmInt8Test(backends);
1436}
1437
1438TEST_CASE ("UnidirectionalSequenceLstmTimeInt8TimeMajorTest_CpuRef_Test")
1439{
1440 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
1441 UnidirectionalSequenceLstmInt8TimeMajorTest(backends);
1442}
1443
1444TEST_CASE ("UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest_CpuRef_Test")
1445{
1446 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
1447 UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionTest(backends);
1448}
1449
1450TEST_CASE ("UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest_CpuRef_Test")
1451{
1452 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
1453 UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(backends);
1454}
1455
1456TEST_CASE ("UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest_CpuRef_Test")
1457{
1458 std::vector <armnn::BackendId> backends = {armnn::Compute::CpuRef};
1459 UnidirectionalSequenceLstmInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest(backends);
1460}
1461
Narumol Prangnawarat7684b182021-08-12 14:48:15 +01001462} //End of TEST_SUITE("UnidirectionalSequenceLstmTest_CpuRef")
1463
1464} // namespace armnnDelegate