blob: c7194727116c48dc4a3e38222d09b664f7efa854 [file] [log] [blame]
Narumol Prangnawarate5339e72021-07-28 17:33:28 +01001//
2// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "UnidirectionalSequenceLstmTestImpl.hpp"
7
8#include <armnn/utility/NumericCast.hpp>
9
Colm Donelan0c479742021-12-10 12:43:54 +000010#include <armnn/backends/TensorHandle.hpp>
Narumol Prangnawarate5339e72021-07-28 17:33:28 +010011
Sadik Armagana097d2a2021-11-24 15:47:28 +000012#include <armnnTestUtils/TensorCopyUtils.hpp>
Colm Donelan0c479742021-12-10 12:43:54 +000013#include <armnnTestUtils/WorkloadTestUtils.hpp>
Narumol Prangnawarate5339e72021-07-28 17:33:28 +010014
15#include <ResolveType.hpp>
16
17namespace {
18
19template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Cathal Corbettfd5bec42022-03-03 15:13:23 +000020LayerTestResult<T, 3>
21UnidirectionalSequenceLstmTimeMajorSingleBatchTestImpl(
22 armnn::IWorkloadFactory& workloadFactory,
23 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
24 const armnn::ITensorHandleFactory& tensorHandleFactory,
25 const std::vector<T>& input,
26 const std::vector<T>& outputExpected,
27 const armnn::TensorShape& inputShape,
28 const armnn::TensorShape& outputExpectedShape,
29 float qScale = 0.0f,
30 int32_t qOffset = 0,
31 armnn::DataType constantDataType = armnn::DataType::Float32)
32{
33 IgnoreUnused(memoryManager);
34 unsigned int batchSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
35 unsigned int inputSize = armnn::numeric_cast<unsigned int>(inputShape[2]);
36 unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
37 unsigned numUnits = outputSize;
38
39 armnn::TensorInfo inputTensorInfo({1, batchSize , inputSize}, ArmnnType, qScale, qOffset );
40 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
41 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
42
43 armnn::TensorInfo outputTensorInfo({1, batchSize, outputSize}, ArmnnType, qScale, qOffset);
44
45 std::vector<T> inputVector;
46 inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
47
48 std::vector<T> cellStateInVector(batchSize * numUnits, T());
49 std::vector<T> outputStateInVector(batchSize * outputSize, T());
50
51 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
52
53 std::vector<T> outputVector;
54 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
55
56 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
57 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
58 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
59 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
60 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
61
62 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
63
64 armnn::UnidirectionalSequenceLstmQueueDescriptor data;
65 armnn::WorkloadInfo info;
66
67 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
68 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
69 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
70
71 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
72
73 armnn::TensorInfo tensorInfo4({numUnits}, constantDataType , qScale, qOffset);
74 armnn::TensorInfo tensorInfo8({numUnits, 2}, constantDataType, qScale, qOffset);
75 armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset);
76
77 std::vector<float> inputToInputWeights = {-0.45018822f, -0.02338299f, -0.0870589f,
78 -0.34550029f, 0.04266912f, -0.15680569f,
79 -0.34856534f, 0.43890524f};
80
81 std::vector<float> inputToForgetWeights = { 0.09701663f, 0.20334584f, -0.50592935f,
82 -0.31343272f, -0.40032279f, 0.44781327f,
83 0.01387155f, -0.35593212f};
84
85 std::vector<float> inputToCellWeights = { -0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f,
86 -0.20583314f, 0.44344562f, 0.22077113f,
87 -0.29909778f};
88
89 std::vector<float> inputToOutputWeights = { -0.25065863f, -0.28290087f, 0.04613829f,
90 0.40525138f, 0.44272184f, 0.03897077f,
91 -0.1556896f, 0.19487578f};
92
93 std::vector<float> recurrentToInputWeights = {-0.0063535f, -0.2042388f, 0.31454784f,
94 -0.35746509f, 0.28902304f, 0.08183324f,
95 -0.16555229f, 0.02286911f, -0.13566875f,
96 0.03034258f, 0.48091322f, -0.12528998f,
97 0.24077177f, -0.51332325f, -0.33502164f,
98 0.10629296f};
99
100 std::vector<float> recurrentToForgetWeights = { -0.48684245f, -0.06655136f, 0.42224967f,
101 0.2112639f, 0.27654213f, 0.20864892f,
102 -0.07646349f, 0.45877004f, 0.00141793f,
103 -0.14609534f, 0.36447752f, 0.09196436f,
104 0.28053468f, 0.01560611f, -0.20127171f,
105 -0.01140004f};
106
107 std::vector<float> recurrentToCellWeights = { -0.3407414f, 0.24443203f, -0.2078532f,
108 0.26320225f, 0.05695659f, -0.00123841f,
109 -0.4744786f, -0.35869038f, -0.06418842f,
110 -0.13502428f, -0.501764f, 0.22830659f,
111 -0.46367589f, 0.26016325f, -0.03894562f,
112 -0.16368064f};
113
114 std::vector<float> recurrentToOutputWeights = { 0.43385774f, -0.17194885f, 0.2718237f,
115 0.09215671f, 0.24107647f, -0.39835793f,
116 0.18212086f, 0.01301402f, 0.48572797f,
117 -0.50656658f, 0.20047462f, -0.20607421f,
118 -0.51818722f, -0.15390486f, 0.0468148f,
119 0.39922136f};
120
121 std::vector<float> cellToInputWeights = {0., 0., 0., 0.};
122
123 std::vector<float> inputGateBias = {0., 0., 0., 0.};
124
125 std::vector<float> forgetGateBias = {1., 1., 1., 1.};
126
127 std::vector<float> cellBias = {0., 0., 0., 0.};
128
129 std::vector<float> outputGateBias = {0., 0., 0., 0.};
130
131 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo8);
132 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo8);
133 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo8);
134 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo8);
135 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
136 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
137 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
138 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
139 armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo4);
140 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
141 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
142 armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
143 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
144
145 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
146 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
147 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
148 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
149 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
150 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
151 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
152 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
153 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
154 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
155 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
156 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
157 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
158
159 data.m_InputToInputWeights = &inputToInputWeightsTensor;
160 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
161 data.m_InputToCellWeights = &inputToCellWeightsTensor;
162 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
163 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
164 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
165 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
166 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
167 data.m_InputGateBias = &inputGateBiasTensor;
168 data.m_ForgetGateBias = &forgetGateBiasTensor;
169 data.m_CellBias = &cellBiasTensor;
170 data.m_OutputGateBias = &outputGateBiasTensor;
171
172 // Flags to set test configuration
173 data.m_Parameters.m_ActivationFunc = 4;
174 data.m_Parameters.m_CifgEnabled = false;
175 data.m_Parameters.m_PeepholeEnabled = false;
176 data.m_Parameters.m_ProjectionEnabled = false;
177 data.m_Parameters.m_ClippingThresCell = 10;
178 data.m_Parameters.m_ClippingThresProj = 0;
179 data.m_Parameters.m_TimeMajor = true;
180
181 std::unique_ptr<armnn::IWorkload> workload
182 = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
183 inputHandle->Allocate();
184 outputStateInHandle->Allocate();
185 cellStateInHandle->Allocate();
186
187 outputHandle->Allocate();
188
189 CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
190 CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
191 CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
192
193 workload->Execute();
194
195 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
196
197 return LayerTestResult<T, 3>(actualOutput,
198 outputVector,
199 outputHandle->GetShape(),
200 outputTensorInfo.GetShape());
201}
202
203template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
Narumol Prangnawarate5339e72021-07-28 17:33:28 +0100204LayerTestResult<T, 3> UnidirectionalSequenceLstmLayerFloat32TestImpl(
205 armnn::IWorkloadFactory& workloadFactory,
206 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
207 const armnn::ITensorHandleFactory& tensorHandleFactory,
208 const std::vector<T>& input,
209 const std::vector<T>& outputExpected,
210 const armnn::TensorShape& inputShape,
211 const armnn::TensorShape& outputExpectedShape,
212 float qScale = 0.0f,
213 int32_t qOffset = 0,
214 armnn::DataType constantDataType = armnn::DataType::Float32) {
215 IgnoreUnused(memoryManager);
216 unsigned int batchSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
217 unsigned int timeSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
218 unsigned int inputSize = armnn::numeric_cast<unsigned int>(inputShape[2]);
219 unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
220 unsigned numUnits = outputSize;
221
222 armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, ArmnnType, qScale, qOffset);
223 armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
224 armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
225
226 armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, ArmnnType, qScale, qOffset);
227
228 std::vector<T> inputVector;
229 inputVector.assign(input.data(), input.data() + (batchSize * timeSize * inputSize));
230
231 std::vector<T> cellStateInVector(batchSize * numUnits, T());
232 std::vector<T> outputStateInVector(batchSize * outputSize, T());
233
234 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
235
236 std::vector<T> outputVector;
237 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * timeSize * outputSize));
238
239 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
240 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
241 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
242 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
243 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
244
245 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
246
247 armnn::UnidirectionalSequenceLstmQueueDescriptor data;
248 armnn::WorkloadInfo info;
249
250 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
251 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
252 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
253
254 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
255
256 armnn::TensorInfo tensorInfo4({numUnits}, constantDataType, qScale, qOffset);
257 armnn::TensorInfo tensorInfo12({numUnits, 3}, constantDataType, qScale, qOffset);
258 armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset);
259
260 std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
261 -0.117484632f, 0.3298470976f, -0.1179017122f,
262 0.214305695f, 0.42135173085f, 0.003878414626f,
263 -0.348303917f, -0.1881275477f, 0.0343011027f };
264
265 std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
266 -0.3810434485f, 0.268383264f, -0.009807467424f,
267 -0.3522925403f, -0.24275735512f, -0.28344226125f,
268 0.13512269116f, -0.4932442977f, -0.10039821991f };
269
270 std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
271 0.386399507f, -0.259465157985f, -0.16545993089f,
272 -0.4230232555f, 0.341664791103f, -0.18127849691f,
273 -0.2277662414f, -0.55275535589f, 0.34184026718f };
274
275 std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
276 0.53969591851f, 0.23393625035f, -0.27140527306f,
277 0.50009280443f, 0.07511717046f, 0.3998299249f,
278 -0.51717478049f, 0.1889653282f, -0.367323637f };
279
280 std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
281 -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
282 0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
283 0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f };
284
285 std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
286 -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
287 -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
288 -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
289
290 std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
291 -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
292 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
293 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
294
295 std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
296 -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
297 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
298 -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
299
300 std::vector<float> inputGateBias = { 0., 0., 0., 0. };
301
302 std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
303
304 std::vector<float> cellBias = { 0., 0., 0., 0. };
305
306 std::vector<float> outputGateBias = { 0., 0., 0., 0. };
307
308 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo12);
309 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo12);
310 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo12);
311 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo12);
312 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
313 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
314 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
315 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
316 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
317 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
318 armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
319 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
320
321 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
322 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
323 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
324 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
325 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
326 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
327 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
328 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
329 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
330 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
331 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
332 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
333
334 data.m_InputToInputWeights = &inputToInputWeightsTensor;
335 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
336 data.m_InputToCellWeights = &inputToCellWeightsTensor;
337 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
338 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
339 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
340 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
341 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
342 data.m_InputGateBias = &inputGateBiasTensor;
343 data.m_ForgetGateBias = &forgetGateBiasTensor;
344 data.m_CellBias = &cellBiasTensor;
345 data.m_OutputGateBias = &outputGateBiasTensor;
346
347 // Flags to set test configuration
348 data.m_Parameters.m_ClippingThresCell = 10;
349 data.m_Parameters.m_ClippingThresProj = 0;
350 data.m_Parameters.m_ActivationFunc = 4;
351 data.m_Parameters.m_CifgEnabled = false;
352 data.m_Parameters.m_PeepholeEnabled = false;
353 data.m_Parameters.m_ProjectionEnabled = false;
354 data.m_Parameters.m_TimeMajor = false;
355
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000356 std::unique_ptr<armnn::IWorkload> workload
357 = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
Narumol Prangnawarate5339e72021-07-28 17:33:28 +0100358 inputHandle->Allocate();
359 outputStateInHandle->Allocate();
360 cellStateInHandle->Allocate();
361
362 outputHandle->Allocate();
363
364 CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
365 CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
366 CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
367
368 workload->Execute();
369
370 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
371
372 return LayerTestResult<T, 3>(actualOutput,
373 outputVector,
374 outputHandle->GetShape(),
375 outputTensorInfo.GetShape());
376}
377
378template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
379LayerTestResult<T, 3>
380UnidirectionalSequenceLstmLayerFloat32TimeMajorTestImpl(
381 armnn::IWorkloadFactory& workloadFactory,
382 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
383 const armnn::ITensorHandleFactory& tensorHandleFactory,
384 const std::vector<T>& input,
385 const std::vector<T>& outputExpected,
386 const armnn::TensorShape& inputShape,
387 const armnn::TensorShape& outputExpectedShape,
388 float qScale = 0.0f,
389 int32_t qOffset = 0,
390 armnn::DataType constantDataType = armnn::DataType::Float32) {
391 IgnoreUnused(memoryManager);
392 unsigned int batchSize = armnn::numeric_cast<unsigned int>(inputShape[1]);
393 unsigned int timeSize = armnn::numeric_cast<unsigned int>(inputShape[0]);
394 unsigned int inputSize = armnn::numeric_cast<unsigned int>(inputShape[2]);
395 unsigned int outputSize = armnn::numeric_cast<unsigned int>(outputExpectedShape[2]);
396 unsigned numUnits = outputSize;
397
398 armnn::TensorInfo inputTensorInfo({timeSize, batchSize, inputSize}, ArmnnType, qScale, qOffset);
399 armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
400 armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
401
402 armnn::TensorInfo outputTensorInfo({timeSize, batchSize, outputSize}, ArmnnType, qScale, qOffset);
403
404 std::vector<T> inputVector;
405 inputVector.assign(input.data(), input.data() + (batchSize * timeSize * inputSize));
406
407 std::vector<T> cellStateInVector(batchSize * numUnits, T());
408 std::vector<T> outputStateInVector(batchSize * outputSize, T());
409
410 std::vector<T> actualOutput(outputTensorInfo.GetNumElements());
411
412 std::vector<T> outputVector;
413 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * timeSize * outputSize));
414
415 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
416 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
417 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
418 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
419 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
420
421 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
422
423 armnn::UnidirectionalSequenceLstmQueueDescriptor data;
424 armnn::WorkloadInfo info;
425
426 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
427 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
428 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
429
430 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
431
432 armnn::TensorInfo tensorInfo4({numUnits}, constantDataType, qScale, qOffset);
433 armnn::TensorInfo tensorInfo12({numUnits, 3}, constantDataType, qScale, qOffset);
434 armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset);
435
436 std::vector<float> inputToInputWeights = { 0.27277296781539917f, 0.3813590407371521f, -0.394489049911499f,
437 0.2782636880874634f, -0.3793870210647583f, -0.018918335437774658f,
438 0.2724653482437134f, -0.19314253330230713f, -0.2947450876235962f,
439 -0.30253493785858154f, 0.4241350293159485f, -0.22560018301010132f };
440
441 std::vector<float> inputToForgetWeights = { -0.2667974531650543f, -0.05505800247192383f, -0.20932340621948242f,
442 -0.14345619082450867f, 0.09666192531585693f, -0.2604355812072754f,
443 -0.2681812047958374f, -0.3314584493637085f, 0.4485899806022644f,
444 -0.23467743396759033f, 0.5072842240333557f, -0.4192768931388855f };
445
446 std::vector<float> inputToCellWeights = { -0.15782442688941956f, -0.027530014514923096f, 0.4789854884147644f,
447 0.23227906227111816f, 0.28259342908859253f, -0.030095696449279785f,
448 0.10071521997451782f, -0.08535495400428772f, 0.18563997745513916f,
449 -0.3049069046974182f, -0.478048175573349f, 0.025234103202819824f };
450
451 std::vector<float> inputToOutputWeights = { -0.04584759473800659f, -0.2716066539287567f, 0.012970447540283203f,
452 -0.4729190170764923f, -0.37422770261764526f, 0.49352723360061646f,
453 0.3163864016532898f, -0.436781644821167f, -0.33074596524238586f,
454 -0.32885751128196716f, -0.40959352254867554f, -0.2124689817428589f };
455
456 std::vector<float> recurrentToInputWeights = { 0.23788475990f, -0.24948765337f, 0.50044941902f, 0.14431896805f,
457 -0.115940228137f, -0.717082679f, -0.17208620906f, 0.17850610617f,
458 -0.16702319684f, -0.11384502053f, -0.309785276245f, -0.3316611672f,
459 0.52380162477f, -0.06839632987f, -0.391478359627f, -0.10756178963f };
460
461 std::vector<float> recurrentToForgetWeights = { 0.11383482068f, 0.1676601767f, -0.08550968004f, 0.03399394089f,
462 0.08042152225f, -0.2133381964f, 0.05182432704f, 0.38161808255f,
463 -0.5018365979f, -0.08043262364f, 0.07894329014f, -0.07547105155f,
464 0.12047368288f, 0.2986997961f, 0.0485043078f, -0.13372567296f };
465
466 std::vector<float> recurrentToCellWeights = { 0.0433832928545f, 0.07587072294f, -0.120520234107f, 0.604576051f,
467 -0.434353142986f, 0.009314475068f, 0.005085289478f, 0.08488202038f,
468 -0.00025437487886f, 0.15245915082f, -0.1936587542f, 0.004754020f,
469 -0.1582719236f, 0.3307867646f, 0.0236605107784f, 0.307716339826f };
470
471 std::vector<float> recurrentToOutputWeights = { -0.079031050201f, 0.041414566286f, -0.583727357285f, 0.1025384515f,
472 -0.172372072937f, 0.09214124082f, 0.178184121827f, -0.2439443916f,
473 0.104485116899f, 0.2600405514f, 0.064414866268f, 0.24141204357f,
474 0.281875759363f, -0.14234502664f, 0.15126448862f, -0.24421440064f };
475
476 std::vector<float> inputGateBias = { 0., 0., 0., 0. };
477
478 std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
479
480 std::vector<float> cellBias = { 0., 0., 0., 0. };
481
482 std::vector<float> outputGateBias = { 0., 0., 0., 0. };
483
484 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo12);
485 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo12);
486 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo12);
487 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo12);
488 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
489 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
490 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
491 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
492 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo4);
493 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
494 armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
495 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
496
497 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
498 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
499 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
500 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
501 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
502 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
503 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
504 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
505 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
506 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
507 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
508 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
509
510 data.m_InputToInputWeights = &inputToInputWeightsTensor;
511 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
512 data.m_InputToCellWeights = &inputToCellWeightsTensor;
513 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
514 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
515 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
516 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
517 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
518 data.m_InputGateBias = &inputGateBiasTensor;
519 data.m_ForgetGateBias = &forgetGateBiasTensor;
520 data.m_CellBias = &cellBiasTensor;
521 data.m_OutputGateBias = &outputGateBiasTensor;
522
523 // Flags to set test configuration
524 data.m_Parameters.m_ClippingThresCell = 10;
525 data.m_Parameters.m_ClippingThresProj = 0;
526 data.m_Parameters.m_ActivationFunc = 4;
527 data.m_Parameters.m_CifgEnabled = false;
528 data.m_Parameters.m_PeepholeEnabled = false;
529 data.m_Parameters.m_ProjectionEnabled = false;
530 data.m_Parameters.m_TimeMajor = true;
531
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000532 std::unique_ptr<armnn::IWorkload> workload
533 = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
Narumol Prangnawarate5339e72021-07-28 17:33:28 +0100534 inputHandle->Allocate();
535 outputStateInHandle->Allocate();
536 cellStateInHandle->Allocate();
537
538 outputHandle->Allocate();
539
540 CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
541 CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
542 CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
543
544 workload->Execute();
545
546 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
547
548 return LayerTestResult<T, 3>(actualOutput,
549 outputVector,
550 outputHandle->GetShape(),
551 outputTensorInfo.GetShape());
552}
553
554} // anonymous namespace
555
Cathal Corbettfd5bec42022-03-03 15:13:23 +0000556LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerFloat32TimeMajorSingleBatchTest(
557 armnn::IWorkloadFactory& workloadFactory,
558 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
559 const armnn::ITensorHandleFactory& tensorHandleFactory)
560{
561 armnn::TensorInfo inputDesc({1, 2, 2}, armnn::DataType::Float32);
562 std::vector<float> input = {2., 3., 3., 4.};
563
564 armnn::TensorInfo outputDesc({1, 2, 4}, armnn::DataType::Float32);
565 std::vector<float> expectedOutput =
566 {-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
567 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f};
568
569 return UnidirectionalSequenceLstmTimeMajorSingleBatchTestImpl<armnn::DataType::Float32>(
570 workloadFactory, memoryManager, tensorHandleFactory,
571 input, expectedOutput, inputDesc.GetShape(), outputDesc.GetShape());
572}
573
574LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerFloat32BatchMajorSingleBatchTest(
575 armnn::IWorkloadFactory& workloadFactory,
576 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
577 const armnn::ITensorHandleFactory& tensorHandleFactory) {
578 armnn::TensorInfo inputInfo({3, 1, 3}, armnn::DataType::Float32);
579 std::vector<float> input = { 1., 2., 3., 4., 5., 4., 3., 2., 1. };
580
581 armnn::TensorInfo outputInfo({3, 1, 4}, armnn::DataType::Float32);
582 std::vector<float> expectedOutput = { -0.0714901f, -0.162117f, -0.175168f, -0.0232934f,
583 -0.0424661f, -0.231802f, -0.513374f, -0.00680323f,
584 -0.0668735f, 0.204078f, -0.42765f, -0.0312321f };
585 return UnidirectionalSequenceLstmLayerFloat32TestImpl<armnn::DataType::Float32>(
586 workloadFactory, memoryManager, tensorHandleFactory,
587 input, expectedOutput, inputInfo.GetShape(), outputInfo.GetShape());
588}
589
Narumol Prangnawarate5339e72021-07-28 17:33:28 +0100590LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerFloat32Test(
591 armnn::IWorkloadFactory& workloadFactory,
592 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
593 const armnn::ITensorHandleFactory& tensorHandleFactory) {
594 armnn::TensorInfo inputInfo({3, 2, 3}, armnn::DataType::Float32);
595 std::vector<float> input = { 1., 2., 3., 4., 5., 4.,
596 3., 2., 1., 2., 3., 4.,
597 5., 4., 3., 2., 1., 2. };
598
599 armnn::TensorInfo outputInfo({3, 2, 4}, armnn::DataType::Float32);
600 std::vector<float> expectedOutput = { -0.07149004f, -0.1621171f, -0.17516759f, -0.0232934225f,
601 -0.16810727f, -0.41412935f, -0.5498753f, -0.00803578f,
602 -0.06687349f, 0.204077631f, -0.4276504f, -0.03123213f,
603 -0.12000261f, -0.0941918f, -0.45639035f, -0.02870186f,
604 -0.03429216f, 0.20824050f, -0.6569892f, -0.004152651f,
605 -0.10493034f, 0.14210969f, -0.58347696f, -0.03297536f };
606 return UnidirectionalSequenceLstmLayerFloat32TestImpl<armnn::DataType::Float32>(
607 workloadFactory, memoryManager, tensorHandleFactory,
608 input, expectedOutput, inputInfo.GetShape(), outputInfo.GetShape());
609}
610
611LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerFloat32TimeMajorTest(
612 armnn::IWorkloadFactory& workloadFactory,
613 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
614 const armnn::ITensorHandleFactory& tensorHandleFactory) {
615 armnn::TensorInfo inputInfo({2, 3, 3}, armnn::DataType::Float32);
616 std::vector<float> input = { 1., 2., 3., 4., 5., 4.,
617 3., 2., 1., 2., 3., 4.,
618 5., 4., 3., 2., 1., 2. };
619
620 armnn::TensorInfo outputInfo({2, 3, 4}, armnn::DataType::Float32);
621 std::vector<float> expectedOutput = { 0.135657698f, 0.124672532f, 0.0212090332f, -0.0530203655f,
622 0.106138252f, 0.0404792242f, 0.0151643595f, -0.00675163185f,
623 -0.0128514022f, 0.0644884035f, 0.0709072053f, -0.0454045124f,
624 0.16288602f, 0.16649379f, 0.02770456f, -0.03698075f,
625 0.11171641f, 0.043119f , 0.0762981f , -0.01228541f,
626 0.10439701f, 0.21439962f, 0.11919238f, -0.08390583f };
627 return UnidirectionalSequenceLstmLayerFloat32TimeMajorTestImpl<armnn::DataType::Float32>(
628 workloadFactory, memoryManager, tensorHandleFactory,
629 input, expectedOutput, inputInfo.GetShape(), outputInfo.GetShape());
630}
631
632LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionTest(
633 armnn::IWorkloadFactory& workloadFactory,
634 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
635 const armnn::ITensorHandleFactory& tensorHandleFactory)
636{
637 IgnoreUnused(memoryManager);
638 unsigned int batchSize = 2;
639 unsigned int timeSize = 3;
640 unsigned int outputSize = 5;
641 unsigned int inputSize = 4;
642 unsigned numUnits = 6;
643
644 armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
645 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
646 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
647 armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
648
649 const std::vector<float> inputVector = { 1., 2., 3., 4., 5., 4.,
650 3., 2., 1., 2., 3., 4.,
651 5., 4., 3., 2., 1., 2.,
652 1., 2., 3., 4., 5., 4.};
653
654 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
655 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
656
657 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
658
659 const std::vector<float> expectedOutput = { -0.0135612f, -0.0263441f, 0.0314008f, -0.00883455f, 0.00763052f,
660 -0.00126877f, -0.0292959f, 0.0449957f, -0.00976195f, -0.00492338f,
661 -0.0175702f, -0.0431753f, 0.0597117f, -0.0169154f, 0.0142087f,
662 0.00472515f, -0.0196355f, 0.0342524f, -0.00407936f, -0.0253189f,
663 -0.00512944f, -0.0293754f, 0.0512771f, -0.0151874f, -0.0246433f,
664 -0.00744986f, -0.0345103f, 0.0450666f, -0.00944991f, 0.0127171f };
665
666 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
667 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
668 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
669 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
670 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
671 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
672
673 armnn::UnidirectionalSequenceLstmQueueDescriptor data;
674 armnn::WorkloadInfo info;
675
676 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
677 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
678 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
679 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
680
681 armnn::TensorInfo tensorInfo5({outputSize}, armnn::DataType::Float32);
682 armnn::TensorInfo tensorInfo6({numUnits}, armnn::DataType::Float32);
683 armnn::TensorInfo tensorInfo6x4({numUnits, inputSize}, armnn::DataType::Float32);
684 armnn::TensorInfo tensorInfo6x5({numUnits, outputSize}, armnn::DataType::Float32);
685 armnn::TensorInfo tensorInfo5x6({outputSize, numUnits}, armnn::DataType::Float32);
686
687 std::vector<float> inputToInputWeights = { 0.021393683f, 0.06124551f, 0.046905167f, -0.014657677f,
688 -0.03149463f, 0.09171803f, 0.14647801f, 0.10797193f,
689 -0.0057968358f, 0.0019193048f, -0.2726754f, 0.10154029f,
690 -0.018539885f, 0.080349885f, -0.10262385f, -0.022599787f,
691 -0.09121155f, -0.008675967f, -0.045206103f, -0.0821282f,
692 -0.008045952f, 0.015478081f, 0.055217247f, 0.038719587f };
693
694 std::vector<float> inputToForgetWeights = { -0.0018401089f, -0.004852237f, 0.03698424f, 0.014181704f,
695 0.028273236f, -0.016726194f, -0.05249759f, -0.10204261f,
696 0.00861066f, -0.040979505f, -0.009899187f, 0.01923892f,
697 -0.028177269f, -0.08535103f, -0.14585495f, 0.10662567f,
698 -0.01909731f, -0.017883534f, -0.0047269356f, -0.045103323f,
699 0.0030784295f, 0.076784775f, 0.07463696f, 0.094531395f};
700
701 std::vector<float> inputToCellWeights = { -0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
702 -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
703 -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
704 -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
705 -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
706 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f };
707
708 std::vector<float> inputToOutputWeights = { -0.0998932f, -0.07201956f, -0.052803773f, -0.15629593f,
709 -0.15001918f, -0.07650751f, 0.02359855f, -0.075155355f,
710 -0.08037709f, -0.15093534f, 0.029517552f, -0.04751393f,
711 0.010350531f, -0.02664851f, -0.016839722f, -0.023121163f,
712 0.0077019283f, 0.012851257f, -0.05040649f, -0.0129761f,
713 -0.021737747f, -0.038305793f, -0.06870586f, -0.01481247f };
714
715 std::vector<float> inputGateBias = { 0.02234832f, 0.14757581f, 0.18176508f,
716 0.10380666f, 0.053110216f, -0.06928846f };
717
718 std::vector<float> forgetGateBias = { 0.035185695f, -0.042891346f, -0.03032477f,
719 0.23027696f, 0.11098921f, 0.08989442f };
720
721 std::vector<float> cellBias = { -0.024379363f, 0.0055531194f, 0.23377132f,
722 0.033463873f, -0.1483596f, 0.029460307f };
723
724 std::vector<float> outputGateBias = { 0.046159424f, -0.0012809046f, 0.03563469f,
725 0.12648113f, 0.027195795f, 0.35373217f };
726
727 std::vector<float> recurrentToInputWeights = { -0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
728 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
729 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
730 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
731 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f,
732 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
733 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
734 0.14283475f, -0.07390571f };
735
736 std::vector<float> recurrentToCellWeights = { -0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
737 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
738 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
739 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
740 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
741 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
742 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
743 -0.019443132f, -0.030755889f };
744
745 std::vector<float> recurrentToForgetWeights = { -0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
746 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
747 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
748 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
749 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f,
750 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
751 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
752 0.061878487f, -0.04729229f };
753
754 std::vector<float> recurrentToOutputWeights = { 0.025825322f, -0.05813119f, 0.09495884f,
755 -0.045984812f,-0.01255415f, -0.0026479573f,
756 -0.08196161f, -0.054914974f, -0.0046604523f,
757 -0.029587349f, -0.044576716f, -0.07480124f,
758 -0.082868785f, 0.023254942f, 0.027502948f,
759 -0.0039728214f, -0.08683098f, -0.08116779f,
760 -0.014675607f, -0.037924774f, -0.023314456f,
761 -0.007401714f, -0.09255757f, 0.029460307f,
762 -0.08829125f, -0.005139627f, -0.08989442f,
763 -0.0555066f, 0.13596267f, 0.025062224f };
764
765 std::vector<float> cellToInputWeights = { 0.040369894f, 0.030746894f, 0.24704495f,
766 0.018586371f, -0.037586458f, -0.15312155f };
767
768 std::vector<float> cellToForgetWeights = { -0.01998659f, -0.15568835f, -0.24248174f,
769 -0.012770197f, 0.041331276f, -0.072311886f };
770
771 std::vector<float> cellToOutputWeights = { 0.08286371f, -0.08261836f, -0.51210177f,
772 0.002913762f, 0.17764764f, -0.5495371f };
773
774 std::vector<float> projectionWeights = { -0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
775 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
776 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
777 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
778 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
779 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f };
780
781 std::vector<float> projectionBiasVector(outputSize, 0.f); //{outputSize}
782
783 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo6x4);
784 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo6x4);
785 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo6x4);
786 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo6x4);
787 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo6x5);
788 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo6x5);
789 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo6x5);
790 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo6x5);
791 armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo6);
792 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo6);
793 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo6);
794 armnn::ScopedTensorHandle cellBiasTensor(tensorInfo6);
795 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo6);
796 armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo6);
797 armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo6);
798 armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo5x6);
799 armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo5);
800
801 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
802 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
803 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
804 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
805 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
806 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
807 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
808 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
809 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
810 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
811 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
812 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
813 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
814 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
815 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
816 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
817 AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data());
818
819 data.m_InputToInputWeights = &inputToInputWeightsTensor;
820 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
821 data.m_InputToCellWeights = &inputToCellWeightsTensor;
822 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
823 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
824 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
825 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
826 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
827 data.m_CellToInputWeights = &cellToInputWeightsTensor;
828 data.m_InputGateBias = &inputGateBiasTensor;
829 data.m_ForgetGateBias = &forgetGateBiasTensor;
830 data.m_CellBias = &cellBiasTensor;
831 data.m_OutputGateBias = &outputGateBiasTensor;
832 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
833 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
834 data.m_ProjectionWeights = &projectionWeightsTensor;
835 data.m_ProjectionBias = &projectionBiasTensor;
836
837 // Flags to set test configuration
838 data.m_Parameters.m_ActivationFunc = 4;
839 data.m_Parameters.m_CifgEnabled = false;
840 data.m_Parameters.m_PeepholeEnabled = true;
841 data.m_Parameters.m_ProjectionEnabled = true;
842 data.m_Parameters.m_LayerNormEnabled = false;
843 data.m_Parameters.m_TimeMajor = false;
844 data.m_Parameters.m_ClippingThresCell = 10.0f;
845
846
Teresa Charlin611c7fb2022-01-07 09:47:29 +0000847 std::unique_ptr<armnn::IWorkload> workload
848 = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
Narumol Prangnawarate5339e72021-07-28 17:33:28 +0100849 inputHandle->Allocate();
850 outputStateInHandle->Allocate();
851 cellStateInHandle->Allocate();
852 outputHandle->Allocate();
853
854 CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
855 CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
856 CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
857
858 workload->Execute();
859
860 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
861
862 return LayerTestResult<float, 3>(actualOutput,
863 expectedOutput,
864 outputHandle->GetShape(),
865 outputTensorInfo.GetShape());
866}
867
868LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTest(
869 armnn::IWorkloadFactory& workloadFactory,
870 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
871 const armnn::ITensorHandleFactory& tensorHandleFactory)
872{
873 IgnoreUnused(memoryManager);
874 unsigned int batchSize = 3;
875 unsigned int timeSize = 2;
876 unsigned int outputSize = 4;
877 unsigned int inputSize = 3;
878 unsigned numUnits = 5;
879
880 armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
881 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
882 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
883 armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
884
885 const std::vector<float> inputVector = { 1., 2., 3., 4., 5., 4.,
886 3., 2., 1., 2., 3., 4.,
887 5., 4., 3., 2., 1., 2. };
888
889 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
890 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
891
892 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
893
894 const std::vector<float> expectedOutput = { 0.0642256f, 0.0343966f, 0.184122f, 0.114717f,
895 0.11458f, 0.0407109f, 0.300327f, 0.174301f,
896 0.0864761f, 0.0362912f, 0.178635f, 0.115689f,
897 0.108008f, 0.0386623f, 0.273471f, 0.167115f,
898 0.0859545f, 0.0331481f, 0.186051f, 0.11888f,
899 0.106649f, 0.0276847f, 0.229863f, 0.166958f };
900
901 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
902 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
903 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
904 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
905 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
906
907 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
908
909 armnn::UnidirectionalSequenceLstmQueueDescriptor data;
910 armnn::WorkloadInfo info;
911
912 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
913 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
914 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
915
916 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
917
918 armnn::TensorInfo tensorInfo4({outputSize}, armnn::DataType::Float32);
919 armnn::TensorInfo tensorInfo5({numUnits}, armnn::DataType::Float32);
920 armnn::TensorInfo tensorInfo5x3({numUnits, inputSize}, armnn::DataType::Float32);
921 armnn::TensorInfo tensorInfo5x4({numUnits, outputSize}, armnn::DataType::Float32);
922 armnn::TensorInfo tensorInfo4x5({outputSize, numUnits}, armnn::DataType::Float32);
923
924 std::vector<float> inputToInputWeights = { -0.49536117f, -0.0556083915f, -0.102400711f,
925 -0.117484632f, 0.3298470976f, -0.1179017122f,
926 0.214305695f, 0.42135173085f, 0.003878414626f,
927 -0.348303917f, -0.1881275477f, 0.0343011027f,
928 -0.38837709614f, -0.05636804124f, 0.4259087456f};
929
930 std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
931 -0.3810434485f, 0.268383264f, -0.009807467424f,
932 -0.3522925403f, -0.24275735512f, -0.28344226125f,
933 0.13512269116f, -0.4932442977f, -0.10039821991f,
934 0.2726137042f, 0.09216640889f, -0.06551410215f};
935
936 std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
937 0.386399507f, -0.259465157985f, -0.16545993089f,
938 -0.4230232555f, 0.341664791103f, -0.18127849691f,
939 -0.2277662414f, -0.55275535589f, 0.34184026718f,
940 0.3954237699f, -0.19407111404f, 0.30412107706f};
941
942 std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
943 0.53969591851f, 0.23393625035f, -0.27140527306f,
944 0.50009280443f, 0.07511717046f, 0.3998299249f,
945 -0.51717478049f, 0.1889653282f, -0.367323637f,
946 -0.12584099173f, -0.12319286912f, 0.2407919466f};
947
948 std::vector<float> inputGateBias{ 0.03f, 0.15f, 0.22f, 0.38f, 0.05f };
949 std::vector<float> forgetGateBias{ 0.1f, -0.3f, -0.2f, 0.1f, 0.4f };
950 std::vector<float> cellBias{ -0.05f, 0.72f, 0.25f, 0.08f, 0.1f };
951 std::vector<float> outputGateBias{ 0.05f, -0.01f, 0.2f, 0.1f, -0.2f };
952
953 std::vector<float> recurrentToInputWeights = { -0.128009796112f, 0.1995525098f, -0.07745539397f, 0.1558421701f,
954 -0.265254765766f, -0.38837709614f, -0.05636804124f, 0.4259087456f,
955 0.17628988623f, 0.3877420127f, 0.53300309181f, -0.0959980934f,
956 0.00302857416f, 0.3266998827f, -0.142509296562f, -0.04433270756f,
957 0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f };
958
959 std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
960 -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
961 -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
962 -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f,
963 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f };
964
965 std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
966 -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
967 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
968 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f,
969 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f };
970
971 std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
972 -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
973 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
974 -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f,
975 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f };
976
977 std::vector<float> cellToInputWeights { 0.05f, 0.1f, 0.25f, 0.15f, -0.02f };
978 std::vector<float> cellToForgetWeights { -0.02f, -0.15f, -0.25f, -0.03f, 0.15f };
979 std::vector<float> cellToOutputWeights { 0.1f, -0.1f, -0.5f, 0.05f, 0.01f };
980
981 std::vector<float> projectionWeights{ -0.1f, 0.2f, 0.01f, -0.2f,
982 0.1f, 0.5f, 0.3f, 0.08f,
983 0.07f, 0.2f, -0.4f, 0.2f,
984 0.5f, -0.4f, 0.3f, -0.2f,
985 0.3f, 0.08f, -0.07f, 0.2f};
986
987 std::vector<float> projectionBiasVector(outputSize, 0.f); //{outputSize}
988
989 std::vector<float> inputLayerNormWeights{ 0.1f, 0.2f, 0.3f, 0.5f, 0.8f };
990 std::vector<float> forgetLayerNormWeights{ 0.1f, 0.2f, 0.3f, 0.5f, 0.2f };
991 std::vector<float> cellLayerNormWeights{ 0.7f, 0.2f, 0.3f, 0.8f, 0.5f };
992 std::vector<float> outputLayerNormWeights{ 0.6f, 0.2f, 0.2f, 0.5f, 0.1f };
993
994 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfo5x3);
995 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo5x3);
996 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo5x3);
997 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo5x3);
998 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo5x4);
999 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfo5x4);
1000 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo5x4);
1001 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo5x4);
1002 armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfo5);
1003 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfo5);
1004 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo5);
1005 armnn::ScopedTensorHandle cellBiasTensor(tensorInfo5);
1006 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo5);
1007 armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo5);
1008 armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo5);
1009 armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfo4x5);
1010 armnn::ScopedTensorHandle projectionBiasTensor(tensorInfo4);
1011
1012 armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfo5);
1013 armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfo5);
1014 armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfo5);
1015 armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfo5);
1016
1017 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
1018 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1019 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1020 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1021 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
1022 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1023 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1024 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1025 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
1026 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
1027 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1028 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1029 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1030 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
1031 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
1032 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
1033 AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data());
1034
1035 AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, inputLayerNormWeights.data());
1036 AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data());
1037 AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data());
1038 AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data());
1039
1040 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1041 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1042 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1043 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1044 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1045 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1046 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1047 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1048 data.m_CellToInputWeights = &cellToInputWeightsTensor;
1049 data.m_InputGateBias = &inputGateBiasTensor;
1050 data.m_ForgetGateBias = &forgetGateBiasTensor;
1051 data.m_CellBias = &cellBiasTensor;
1052 data.m_OutputGateBias = &outputGateBiasTensor;
1053 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1054 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1055 data.m_ProjectionWeights = &projectionWeightsTensor;
1056 data.m_ProjectionBias = &projectionBiasTensor;
1057
1058 data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
1059 data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
1060 data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
1061 data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
1062
1063 // Flags to set test configuration
1064 data.m_Parameters.m_ActivationFunc = 4;
1065 data.m_Parameters.m_CifgEnabled = false;
1066 data.m_Parameters.m_PeepholeEnabled = true;
1067 data.m_Parameters.m_ProjectionEnabled = true;
1068 data.m_Parameters.m_LayerNormEnabled = true;
1069 data.m_Parameters.m_TimeMajor = false;
1070 data.m_Parameters.m_ClippingThresCell = 10.0f;
1071
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001072 std::unique_ptr<armnn::IWorkload> workload
1073 = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
Narumol Prangnawarate5339e72021-07-28 17:33:28 +01001074 inputHandle->Allocate();
1075 outputStateInHandle->Allocate();
1076 cellStateInHandle->Allocate();
1077 outputHandle->Allocate();
1078
1079 CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1080 CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1081 CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1082
1083 workload->Execute();
1084
1085 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1086
1087 return LayerTestResult<float, 3>(actualOutput,
1088 expectedOutput,
1089 outputHandle->GetShape(),
1090 outputTensorInfo.GetShape());
1091}
1092
1093LayerTestResult<float, 3> UnidirectionalSequenceLstmWithCifgWithPeepholeNoProjectionTest(
1094 armnn::IWorkloadFactory& workloadFactory,
1095 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1096 const armnn::ITensorHandleFactory& tensorHandleFactory)
1097{
1098 IgnoreUnused(memoryManager);
1099 unsigned int batchSize = 3;
1100 unsigned int timeSize = 2;
1101 unsigned int inputSize = 3;
1102 unsigned int outputSize = 4;
1103 unsigned numUnits = outputSize;
1104
1105 armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
1106 armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
1107 armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
1108
1109 armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
1110
1111 std::vector<float> inputVector = { 1., 2., 3., 4., 5., 4.,
1112 3., 2., 1., 2., 3., 4.,
1113 5., 4., 3., 2., 1., 2. };
1114
1115 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1116 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1117
1118 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
1119
1120 std::vector<float> outputVector = { -0.0129257f, -0.070531f, -0.153508f, -0.0392391f,
1121 -0.0300169f, -0.195717f, -0.528679f, -0.0818106f,
1122 -0.0332748f, 0.155429f, -0.353966f, -0.0801505f,
1123 -0.032312f, -0.0407911f, -0.435053f, -0.0932317f,
1124 -0.0108233f, 0.165584f, -0.640424f, -0.0447535f,
1125 -0.031675f, 0.125987f, -0.526695f, -0.110093f };
1126
1127 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1128 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1129 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
1130 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1131 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
1132
1133 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1134
1135 armnn::UnidirectionalSequenceLstmQueueDescriptor data;
1136 armnn::WorkloadInfo info;
1137
1138 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1139 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1140 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1141
1142 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1143
1144 armnn::TensorInfo tensorInfo4({numUnits}, armnn::DataType::Float32);
1145 armnn::TensorInfo tensorInfo12({numUnits, 3}, armnn::DataType::Float32);
1146 armnn::TensorInfo tensorInfo16({numUnits, 4}, armnn::DataType::Float32);
1147
1148 std::vector<float> inputToForgetWeights = { 0.2415594226f, 0.15400093799f, 0.4566498398f,
1149 -0.3810434485f, 0.268383264f, -0.009807467424f,
1150 -0.3522925403f, -0.24275735512f, -0.28344226125f,
1151 0.13512269116f, -0.4932442977f, -0.10039821991f };
1152
1153 std::vector<float> inputToCellWeights = { -0.2504855627f, 0.184490025045f, -0.2480507493f,
1154 0.386399507f, -0.259465157985f, -0.16545993089f,
1155 -0.4230232555f, 0.341664791103f, -0.18127849691f,
1156 -0.2277662414f, -0.55275535589f, 0.34184026718f };
1157
1158 std::vector<float> inputToOutputWeights = { 0.2303854227f, 0.5218806862f, -0.4865379333f,
1159 0.53969591851f, 0.23393625035f, -0.27140527306f,
1160 0.50009280443f, 0.07511717046f, 0.3998299249f,
1161 -0.51717478049f, 0.1889653282f, -0.367323637f };
1162
1163 std::vector<float> recurrentToForgetWeights = { -0.09499983487f, -0.08814888417f, -0.04834804721f, 0.1516668247f,
1164 -0.3967529535f, -0.06463699788f, 0.4952811002f, 0.003274492938f,
1165 -0.0968840941f, 0.17928104102f, 0.0031281141592f, -0.3387276584f,
1166 -0.3587934076f, 0.06705895066f, 0.22463923692f, 0.1961955726f };
1167
1168 std::vector<float> recurrentToCellWeights = { -0.21938985582f, -0.3023648226f, -0.1170005202f, -0.3509177422f,
1169 -0.4286288613f, 0.2726137042f, 0.09216640889f, -0.06551410215f,
1170 0.20453298098f, 0.2393476665f, 0.11846517771f, 0.2630801796f,
1171 0.3954237699f, -0.19407111404f, 0.30412107706f, -0.27342408554f };
1172
1173 std::vector<float> recurrentToOutputWeights = { -0.32921677827f, 0.32624614238f, -0.1388191282f, -0.17879831790f,
1174 -0.15185534954f, -0.16918526583f, -0.10087361183f, -0.5436913968f,
1175 0.016758225858f, 0.30454617738f, -0.41493862867f, -0.005565764375f,
1176 -0.12584099173f, -0.12319286912f, 0.2407919466f, -0.08879069983f };
1177
1178 std::vector<float> cellToForgetWeights{ 0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f };
1179
1180 std::vector<float> cellToOutputWeights{ -0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f };
1181
1182 std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
1183
1184 std::vector<float> cellBias = { 0., 0., 0., 0. };
1185
1186 std::vector<float> outputGateBias = { 0., 0., 0., 0. };
1187
1188 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfo12);
1189 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfo12);
1190 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfo12);
1191 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
1192 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
1193 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
1194 armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfo4);
1195 armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfo4);
1196 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfo4);
1197 armnn::ScopedTensorHandle cellBiasTensor(tensorInfo4);
1198 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfo4);
1199
1200 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1201 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1202 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1203 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1204 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1205 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1206 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
1207 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
1208 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1209 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1210 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1211
1212 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1213 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1214 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1215 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1216 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1217 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1218 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1219 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1220 data.m_ForgetGateBias = &forgetGateBiasTensor;
1221 data.m_CellBias = &cellBiasTensor;
1222 data.m_OutputGateBias = &outputGateBiasTensor;
1223
1224 // Flags to set test configuration
1225 data.m_Parameters.m_ClippingThresCell = 10;
1226 data.m_Parameters.m_ClippingThresProj = 0;
1227 data.m_Parameters.m_ActivationFunc = 4;
1228 data.m_Parameters.m_CifgEnabled = true;
1229 data.m_Parameters.m_PeepholeEnabled = true;
1230 data.m_Parameters.m_ProjectionEnabled = false;
1231 data.m_Parameters.m_TimeMajor = false;
1232
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001233 std::unique_ptr<armnn::IWorkload> workload
1234 = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
Narumol Prangnawarate5339e72021-07-28 17:33:28 +01001235 inputHandle->Allocate();
1236 outputStateInHandle->Allocate();
1237 cellStateInHandle->Allocate();
1238
1239 outputHandle->Allocate();
1240
1241 CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1242 CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1243 CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1244
1245 workload->Execute();
1246
1247 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1248
1249 return LayerTestResult<float, 3>(actualOutput,
1250 outputVector,
1251 outputHandle->GetShape(),
1252 outputTensorInfo.GetShape());
1253}
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01001254
1255LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerInt8Test(
1256 armnn::IWorkloadFactory& workloadFactory,
1257 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1258 const armnn::ITensorHandleFactory& tensorHandleFactory)
1259{
1260 IgnoreUnused(memoryManager);
1261 unsigned int batchSize = 3;
1262 unsigned int timeSize = 2;
1263 unsigned int inputSize = 3;
1264 unsigned int outputSize = 4;
1265 unsigned numUnits = outputSize;
1266
1267 armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
1268 armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
1269 armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
1270
1271 armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
1272
1273 const std::vector<float> inputVector = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
1274 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
1275 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
1276
1277 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1278 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1279
1280 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
1281
1282 const std::vector<float> outputVector = { -0.0142517f, -0.0198845f, -0.0120569f, -0.0116868f,
1283 -0.0350714f, -0.0343202f, -0.047504f, -0.0569789f,
1284 -0.0146346f, 0.0106663f, -0.0247238f, -0.0319502f,
1285 -0.0294759f, -0.0129935f, -0.0444175f, -0.0444354f,
1286 -0.0280855f, 0.00545101f, -0.051422f, -0.0463838f,
1287 -0.0310702f, 0.00915739f, -0.0625207f, -0.0482648f };
1288
1289 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1290 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1291 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
1292 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1293 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
1294
1295 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1296
1297 armnn::UnidirectionalSequenceLstmQueueDescriptor data;
1298 armnn::WorkloadInfo info;
1299
1300 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1301 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1302 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1303
1304 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1305
1306 armnn::TensorInfo tensorInfoNumFp({numUnits}, armnn::DataType::Float32);
1307 armnn::TensorInfo tensorInfoNumInput({numUnits, inputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1308 armnn::TensorInfo tensorInfoNumOutput({numUnits, outputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1309
1310 std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
1311 std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
1312 std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
1313 std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
1314
1315 std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
1316 std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
1317 std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
1318 std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
1319
1320 std::vector<float> inputGateBias = { 0., 0., 0., 0. };
1321 std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
1322 std::vector<float> cellBias = { 0., 0., 0., 0. };
1323 std::vector<float> outputGateBias = { 0., 0., 0., 0. };
1324
1325 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfoNumInput);
1326 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoNumInput);
1327 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoNumInput);
1328 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoNumInput);
1329 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfoNumOutput);
1330 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoNumOutput);
1331 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoNumOutput);
1332 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoNumOutput);
1333 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfoNumFp);
1334 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumFp);
1335 armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumFp);
1336 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumFp);
1337
1338 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
1339 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1340 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1341 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1342 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
1343 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1344 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1345 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1346 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
1347 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1348 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1349 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1350
1351 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1352 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1353 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1354 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1355 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1356 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1357 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1358 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1359 data.m_InputGateBias = &inputGateBiasTensor;
1360 data.m_ForgetGateBias = &forgetGateBiasTensor;
1361 data.m_CellBias = &cellBiasTensor;
1362 data.m_OutputGateBias = &outputGateBiasTensor;
1363
1364 // Flags to set test configuration
1365 data.m_Parameters.m_ClippingThresCell = 10;
1366 data.m_Parameters.m_ClippingThresProj = 0;
1367 data.m_Parameters.m_ActivationFunc = 4;
1368 data.m_Parameters.m_CifgEnabled = false;
1369 data.m_Parameters.m_PeepholeEnabled = false;
1370 data.m_Parameters.m_ProjectionEnabled = false;
1371 data.m_Parameters.m_TimeMajor = false;
1372
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001373 std::unique_ptr<armnn::IWorkload> workload
1374 = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01001375 inputHandle->Allocate();
1376 outputStateInHandle->Allocate();
1377 cellStateInHandle->Allocate();
1378
1379 outputHandle->Allocate();
1380
1381 CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1382 CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1383 CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1384
1385 workload->Execute();
1386
1387 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1388
1389 return LayerTestResult<float, 3>(actualOutput,
1390 outputVector,
1391 outputHandle->GetShape(),
1392 outputTensorInfo.GetShape());
1393}
1394
1395LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerInt8TimeMajorTest(
1396 armnn::IWorkloadFactory& workloadFactory,
1397 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1398 const armnn::ITensorHandleFactory& tensorHandleFactory)
1399{
1400 IgnoreUnused(memoryManager);
1401 unsigned int batchSize = 3;
1402 unsigned int timeSize = 2;
1403 unsigned int inputSize = 3;
1404 unsigned int outputSize = 4;
1405 unsigned numUnits = outputSize;
1406
1407 armnn::TensorInfo inputTensorInfo({timeSize, batchSize, inputSize}, armnn::DataType::Float32);
1408 armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
1409 armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
1410
1411 armnn::TensorInfo outputTensorInfo({timeSize, batchSize, outputSize}, armnn::DataType::Float32);
1412
1413 const std::vector<float> inputVector = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
1414 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
1415 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
1416
1417 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1418 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1419
1420 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
1421
1422 const std::vector<float> outputVector = { -0.0142517f, -0.0198845f, -0.0120122f, -0.0116868f,
1423 -0.0261295f, -0.0188487f, -0.0345463f, -0.049733f,
1424 -0.0146346f, 0.0106663f, -0.0247238f, -0.0319502f,
1425 -0.0291863f, -0.0369402f, -0.0354071f, -0.0296529f,
1426 -0.0419539f, -0.00617731f, -0.0814796f, -0.0804005f,
1427 -0.0244737f, 0.0119905f, -0.0457527f, -0.0331862f };
1428 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1429 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1430 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
1431 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1432 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
1433
1434 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1435
1436 armnn::UnidirectionalSequenceLstmQueueDescriptor data;
1437 armnn::WorkloadInfo info;
1438
1439 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1440 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1441 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1442
1443 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1444
1445 armnn::TensorInfo tensorInfoNumFp({numUnits}, armnn::DataType::Float32);
1446 armnn::TensorInfo tensorInfoNumInput({numUnits, inputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1447 armnn::TensorInfo tensorInfoNumOutput({numUnits, outputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1448
1449 std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
1450 std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
1451 std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
1452 std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
1453
1454 std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
1455 std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
1456 std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
1457 std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
1458
1459
1460 std::vector<float> inputGateBias = { 0., 0., 0., 0. };
1461 std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
1462 std::vector<float> cellBias = { 0., 0., 0., 0. };
1463 std::vector<float> outputGateBias = { 0., 0., 0., 0. };
1464
1465 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfoNumInput);
1466 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoNumInput);
1467 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoNumInput);
1468 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoNumInput);
1469 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfoNumOutput);
1470 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoNumOutput);
1471 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoNumOutput);
1472 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoNumOutput);
1473 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfoNumFp);
1474 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumFp);
1475 armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumFp);
1476 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumFp);
1477
1478 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
1479 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1480 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1481 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1482 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
1483 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1484 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1485 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1486 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
1487 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1488 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1489 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1490
1491 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1492 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1493 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1494 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1495 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1496 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1497 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1498 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1499 data.m_InputGateBias = &inputGateBiasTensor;
1500 data.m_ForgetGateBias = &forgetGateBiasTensor;
1501 data.m_CellBias = &cellBiasTensor;
1502 data.m_OutputGateBias = &outputGateBiasTensor;
1503
1504 // Flags to set test configuration
1505 data.m_Parameters.m_ClippingThresCell = 10;
1506 data.m_Parameters.m_ClippingThresProj = 0;
1507 data.m_Parameters.m_ActivationFunc = 4;
1508 data.m_Parameters.m_CifgEnabled = false;
1509 data.m_Parameters.m_PeepholeEnabled = false;
1510 data.m_Parameters.m_ProjectionEnabled = false;
1511 data.m_Parameters.m_TimeMajor = true;
1512
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001513 std::unique_ptr<armnn::IWorkload> workload
1514 = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01001515 inputHandle->Allocate();
1516 outputStateInHandle->Allocate();
1517 cellStateInHandle->Allocate();
1518
1519 outputHandle->Allocate();
1520
1521 CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1522 CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1523 CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1524
1525 workload->Execute();
1526
1527 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1528
1529 return LayerTestResult<float, 3>(actualOutput,
1530 outputVector,
1531 outputHandle->GetShape(),
1532 outputTensorInfo.GetShape());
1533}
1534
1535LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerInt8NoCifgWithPeepholeWithProjectionTest(
1536 armnn::IWorkloadFactory& workloadFactory,
1537 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1538 const armnn::ITensorHandleFactory& tensorHandleFactory)
1539{
1540 IgnoreUnused(memoryManager);
1541 unsigned int batchSize = 3;
1542 unsigned int timeSize = 2;
1543 unsigned int outputSize = 4;
1544 unsigned int inputSize = 3;
1545 unsigned numUnits = 4;
1546
1547 armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
1548 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
1549 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
1550 armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
1551
1552 const std::vector<float> inputVector = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
1553 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
1554 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
1555
1556 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1557 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1558
1559 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
1560
1561 const std::vector<float> expectedOutput = { 0.612103f, 1.56788f, 0.31966f, 1.42956f,
1562 0.909718f, 3.07916f, -0.560586f, 3.8907f,
1563 0.753671f, 1.77485f, 0.365122f, 1.60077f,
1564 0.812644f, 2.79092f, -0.605396f, 3.61742f,
1565 0.791857f, 1.64353f, 0.316588f, 1.55192f,
1566 0.807265f, 2.47012f, -0.539598f, 3.25654f };
1567
1568 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1569 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1570 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
1571 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1572 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
1573 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1574
1575 armnn::UnidirectionalSequenceLstmQueueDescriptor data;
1576 armnn::WorkloadInfo info;
1577
1578 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1579 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1580 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1581 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1582
1583 armnn::TensorInfo tensorInfoOut({outputSize}, armnn::DataType::Float32);
1584 armnn::TensorInfo tensorInfoNumFp({numUnits}, armnn::DataType::Float32);
1585 armnn::TensorInfo tensorInfoNum({numUnits}, armnn::DataType::QAsymmS8, 0.1f, 0);
1586 armnn::TensorInfo tensorInfoNumInput({numUnits, inputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1587 armnn::TensorInfo tensorInfoNumOutput({numUnits, outputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1588 armnn::TensorInfo tensorInfoOutNum({outputSize, numUnits}, armnn::DataType::QAsymmS8, 0.1f, 0);
1589
1590 std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3 };
1591 std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
1592 std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
1593 std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
1594
1595 std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -1, -1 };
1596 std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
1597 std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
1598 std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
1599
1600 std::vector<float> inputGateBias = { 0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f};
1601 std::vector<float> forgetGateBias = { 0.035185695f, -0.042891346f, -0.3032477f, 0.23027696f};
1602 std::vector<float> cellBias = { -0.124379363f, 0.55531194f, 0.23377132f, 0.033463873f };
1603 std::vector<float> outputGateBias = { 0.046159424f, -0.12809046f, 0.03563469f, 0.12648113f };
1604
1605 std::vector<int8_t> cellToInputWeights = { 5, 10, 25, 15 };
1606 std::vector<int8_t> cellToForgetWeights = { -5, 15, 25, 3 };
1607 std::vector<int8_t> cellToOutputWeights = { 10, -10, -5, 50 };
1608
1609 std::vector<int8_t> projectionWeights = { -25, 51, 3, -5, 25, 127, 77, 20, 18, 51, -10, 51, -25, 88, 77, -13 };
1610
1611 std::vector<float> projectionBiasVector(outputSize, 0.f); //{outputSize}
1612
1613 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfoNumInput);
1614 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoNumInput);
1615 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoNumInput);
1616 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoNumInput);
1617 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoNumOutput);
1618 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfoNumOutput);
1619 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoNumOutput);
1620 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoNumOutput);
1621 armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfoNum);
1622 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfoNumFp);
1623 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumFp);
1624 armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumFp);
1625 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumFp);
1626 armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNum);
1627 armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNum);
1628 armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfoOutNum);
1629 armnn::ScopedTensorHandle projectionBiasTensor(tensorInfoOut);
1630
1631 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
1632 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1633 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1634 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1635 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
1636 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1637 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1638 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1639 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
1640 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
1641 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1642 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1643 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1644 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
1645 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
1646 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
1647 AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data());
1648
1649 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1650 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1651 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1652 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1653 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1654 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1655 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1656 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1657 data.m_CellToInputWeights = &cellToInputWeightsTensor;
1658 data.m_InputGateBias = &inputGateBiasTensor;
1659 data.m_ForgetGateBias = &forgetGateBiasTensor;
1660 data.m_CellBias = &cellBiasTensor;
1661 data.m_OutputGateBias = &outputGateBiasTensor;
1662 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1663 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1664 data.m_ProjectionWeights = &projectionWeightsTensor;
1665 data.m_ProjectionBias = &projectionBiasTensor;
1666
1667 // Flags to set test configuration
1668 data.m_Parameters.m_ActivationFunc = 4;
1669 data.m_Parameters.m_CifgEnabled = false;
1670 data.m_Parameters.m_PeepholeEnabled = true;
1671 data.m_Parameters.m_ProjectionEnabled = true;
1672 data.m_Parameters.m_LayerNormEnabled = false;
1673 data.m_Parameters.m_TimeMajor = false;
1674 data.m_Parameters.m_ClippingThresCell = 10.0f;
1675
1676
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001677 std::unique_ptr<armnn::IWorkload> workload
1678 = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01001679 inputHandle->Allocate();
1680 outputStateInHandle->Allocate();
1681 cellStateInHandle->Allocate();
1682 outputHandle->Allocate();
1683
1684 CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1685 CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1686 CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1687
1688 workload->Execute();
1689
1690 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1691
1692 return LayerTestResult<float, 3>(actualOutput,
1693 expectedOutput,
1694 outputHandle->GetShape(),
1695 outputTensorInfo.GetShape());
1696}
1697
1698LayerTestResult<float, 3> UnidirectionalSequenceLstmLayerInt8NoCifgWithPeepholeWithProjectionWithLayerNormTest(
1699 armnn::IWorkloadFactory& workloadFactory,
1700 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1701 const armnn::ITensorHandleFactory& tensorHandleFactory)
1702{
1703 IgnoreUnused(memoryManager);
1704 unsigned int batchSize = 3;
1705 unsigned int timeSize = 2;
1706 unsigned int outputSize = 4;
1707 unsigned int inputSize = 3;
1708 unsigned numUnits = 5;
1709
1710 armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
1711 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, armnn::DataType::Float32);
1712 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, armnn::DataType::Float32);
1713 armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
1714
1715 const std::vector<float> inputVector = { 1., 8., 3., 4., 5., 4.,
1716 3., 2., 1., 2., 3., 4.,
1717 5., 4., 3., 2., 1., 2. };
1718
1719 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1720 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1721
1722 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
1723
1724 const std::vector<float> expectedOutput = { 0.0471276f, 0.0168155f, 0.0789885f, 0.16550f,
1725 0.0643133f, -0.0400722f, 0.100593f, 0.197722f,
1726 0.0465562f, -0.0600682f, 0.0622087f, 0.115053f,
1727 0.056287f, -0.0566218f, 0.0856832f, 0.148484f,
1728 0.0457859f, -0.0588112f, 0.0623636f, 0.114333f,
1729 0.0509271f, -0.0754262f, 0.058600f, 0.0801288f };
1730
1731 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1732 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1733 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
1734 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1735 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
1736
1737 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1738
1739 armnn::UnidirectionalSequenceLstmQueueDescriptor data;
1740 armnn::WorkloadInfo info;
1741
1742 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1743 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1744 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1745
1746 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1747
1748 armnn::TensorInfo tensorInfoOut({outputSize}, armnn::DataType::Float32);
1749 armnn::TensorInfo tensorInfoNumFp({numUnits}, armnn::DataType::Float32);
1750 armnn::TensorInfo tensorInfoNum({numUnits}, armnn::DataType::QAsymmS8, 0.1f, 0);
1751 armnn::TensorInfo tensorInfoNumInput({numUnits, inputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1752 armnn::TensorInfo tensorInfoNumOutput({numUnits, outputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1753 armnn::TensorInfo tensorInfoOutNum({outputSize, numUnits}, armnn::DataType::QAsymmS8, 0.1f, 0);
1754
1755 std::vector<int8_t> inputToInputWeights = { -4, -1, -1, -2, 3, -2, 2, 4, 1, -4, -2, 3, 2, 2, -4 };
1756 std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1, -3, -2, -4 };
1757 std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3, 2, 5, -4 };
1758 std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4, -4, -1, -1 };
1759
1760 std::vector<float> inputGateBias = { 0.03f, 0.15f, 0.22f, 0.38f, 0.05f };
1761 std::vector<float> forgetGateBias = { 0.1f, -0.3f, -0.2f, 0.1f, 0.4f };
1762 std::vector<float> cellBias = { -0.05f, 0.72f, 0.25f, 0.08f, 0.1f };
1763 std::vector<float> outputGateBias = { 0.05f, -0.01f, 0.2f, 0.1f, -0.2f };
1764
1765 std::vector<int8_t> recurrentToInputWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3,
1766 5, -1, 1, 3, -1, -1, -1, 4, 2, 3 };
1767
1768 std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3,
1769 5, -1, 1, 3, -2, -1, -1, 2, 2, 1 };
1770
1771 std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2,
1772 1, 2, 3, -2, 3, -3, -1, -5, 1, 3 };
1773
1774 std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3,
1775 -4, -1, -1, -1, 2, -1, 5, 1, -3, -4 };
1776
1777 std::vector<int8_t> cellToInputWeights = { 5, 3, 8, -5, 2 };
1778 std::vector<int8_t> cellToForgetWeights = { -2, -7, 5, -3, 4 };
1779 std::vector<int8_t> cellToOutputWeights = { 9, -10 , -5, 5, 1 };
1780
1781 std::vector<int8_t> projectionWeights = { -1, 2, 1, -2, 1, 5, 3, 8, 7, 2,
1782 -4, 2, 5, -4, 3, -2, 3, 8, -7, 2 };
1783
1784 std::vector<float> projectionBiasVector(outputSize, 0.f); //{outputSize}
1785
1786 std::vector<float> inputLayerNormWeights = { 0.1f, 0.2f, -0.3f, -0.1f, 0.5f };
1787 std::vector<float> forgetLayerNormWeights = { -0.1f, 0.2f, 0.3f, 0.5f, 0.2f };
1788 std::vector<float> cellLayerNormWeights = { 0.5f, 0.2f, 0.3f, 0.4f, -0.5f };
1789 std::vector<float> outputLayerNormWeights = { 0.6f, -0.2f, -0.2f, 0.5f, 0.1f };
1790
1791 armnn::ScopedTensorHandle inputToInputWeightsTensor(tensorInfoNumInput);
1792 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoNumInput);
1793 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoNumInput);
1794 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoNumInput);
1795 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoNumOutput);
1796 armnn::ScopedTensorHandle recurrentToInputWeightsTensor(tensorInfoNumOutput);
1797 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoNumOutput);
1798 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoNumOutput);
1799 armnn::ScopedTensorHandle cellToInputWeightsTensor(tensorInfoNum);
1800 armnn::ScopedTensorHandle inputGateBiasTensor(tensorInfoNumFp);
1801 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumFp);
1802 armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumFp);
1803 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumFp);
1804 armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNum);
1805 armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNum);
1806 armnn::ScopedTensorHandle projectionWeightsTensor(tensorInfoOutNum);
1807 armnn::ScopedTensorHandle projectionBiasTensor(tensorInfoOut);
1808
1809 armnn::ScopedTensorHandle inputLayerNormWeightsTensor(tensorInfoNumFp);
1810 armnn::ScopedTensorHandle forgetLayerNormWeightsTensor(tensorInfoNumFp);
1811 armnn::ScopedTensorHandle cellLayerNormWeightsTensor(tensorInfoNumFp);
1812 armnn::ScopedTensorHandle outputLayerNormWeightsTensor(tensorInfoNumFp);
1813
1814 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, inputToInputWeights.data());
1815 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1816 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1817 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1818 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, recurrentToInputWeights.data());
1819 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1820 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1821 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1822 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, cellToInputWeights.data());
1823 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, inputGateBias.data());
1824 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1825 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1826 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1827 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
1828 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
1829 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, projectionWeights.data());
1830 AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, projectionBiasVector.data());
1831
1832 AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, inputLayerNormWeights.data());
1833 AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, forgetLayerNormWeights.data());
1834 AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, cellLayerNormWeights.data());
1835 AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, outputLayerNormWeights.data());
1836
1837 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1838 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1839 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1840 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1841 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1842 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1843 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1844 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1845 data.m_CellToInputWeights = &cellToInputWeightsTensor;
1846 data.m_InputGateBias = &inputGateBiasTensor;
1847 data.m_ForgetGateBias = &forgetGateBiasTensor;
1848 data.m_CellBias = &cellBiasTensor;
1849 data.m_OutputGateBias = &outputGateBiasTensor;
1850 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1851 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1852 data.m_ProjectionWeights = &projectionWeightsTensor;
1853 data.m_ProjectionBias = &projectionBiasTensor;
1854
1855 data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
1856 data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
1857 data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
1858 data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
1859
1860 // Flags to set test configuration
1861 data.m_Parameters.m_ActivationFunc = 4;
1862 data.m_Parameters.m_CifgEnabled = false;
1863 data.m_Parameters.m_PeepholeEnabled = true;
1864 data.m_Parameters.m_ProjectionEnabled = true;
1865 data.m_Parameters.m_LayerNormEnabled = true;
1866 data.m_Parameters.m_TimeMajor = false;
1867 data.m_Parameters.m_ClippingThresCell = 10.0f;
1868
Teresa Charlin611c7fb2022-01-07 09:47:29 +00001869 std::unique_ptr<armnn::IWorkload> workload
1870 = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01001871 inputHandle->Allocate();
1872 outputStateInHandle->Allocate();
1873 cellStateInHandle->Allocate();
1874 outputHandle->Allocate();
1875
1876 CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
1877 CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
1878 CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
1879
1880 workload->Execute();
1881
1882 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
1883
1884 return LayerTestResult<float, 3>(actualOutput,
1885 expectedOutput,
1886 outputHandle->GetShape(),
1887 outputTensorInfo.GetShape());
1888}
1889
1890LayerTestResult<float, 3> UnidirectionalSequenceLstmInt8WithCifgWithPeepholeNoProjectionTest(
1891 armnn::IWorkloadFactory& workloadFactory,
1892 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1893 const armnn::ITensorHandleFactory& tensorHandleFactory)
1894{
1895 IgnoreUnused(memoryManager);
1896 unsigned int batchSize = 3;
1897 unsigned int timeSize = 2;
1898 unsigned int inputSize = 3;
1899 unsigned int outputSize = 4;
1900 unsigned numUnits = outputSize;
1901
1902 armnn::TensorInfo inputTensorInfo({batchSize, timeSize, inputSize}, armnn::DataType::Float32);
1903 armnn::TensorInfo cellStateInTensorInfo({batchSize, numUnits}, armnn::DataType::Float32);
1904 armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, armnn::DataType::Float32);
1905
1906 armnn::TensorInfo outputTensorInfo({batchSize, timeSize, outputSize}, armnn::DataType::Float32);
1907
1908 const std::vector<float> inputVector = { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.4f,
1909 0.3f, 0.2f, 0.1f, 0.2f, 0.3f, 0.4f,
1910 0.5f, 0.4f, 0.3f, 0.2f, 0.1f, 0.2f };
1911
1912 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1913 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1914
1915 std::vector<float> actualOutput(outputTensorInfo.GetNumElements());
1916
1917 const std::vector<float> outputVector = { -0.0072104f, -0.00991171f, -0.00650478f, -0.00713055f,
1918 -0.0191782f, -0.0161269f, -0.0233683f, -0.054299f,
1919 -0.00783725f, 0.00635271f, -0.0126718f, -0.022613f,
1920 -0.0161351f, -0.00775868f, -0.021054f, -0.0339778f,
1921 -0.0146392f, 0.00330261f, -0.0258733f, -0.0407797f,
1922 -0.0174297f, 0.0050105f, -0.0266275f, -0.0362564f };
1923
1924 std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
1925 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1926 tensorHandleFactory.CreateTensorHandle(cellStateInTensorInfo);
1927 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1928 tensorHandleFactory.CreateTensorHandle(outputStateInTensorInfo);
1929
1930 std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
1931
1932 armnn::UnidirectionalSequenceLstmQueueDescriptor data;
1933 armnn::WorkloadInfo info;
1934
1935 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1936 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1937 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1938
1939 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1940
1941 armnn::TensorInfo tensorInfoNumFp({numUnits}, armnn::DataType::Float32);
1942 armnn::TensorInfo tensorInfoNum({numUnits}, armnn::DataType::QAsymmS8, 0.1f, 0);
1943 armnn::TensorInfo tensorInfoNumInput({numUnits, inputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1944 armnn::TensorInfo tensorInfoNumOutput({numUnits, outputSize}, armnn::DataType::QAsymmS8, 0.1f, 0);
1945
1946 std::vector<int8_t> inputToForgetWeights = { 2, 1, 4, -4, 3, -1, -3, -2, -3, 1, -4, -1 };
1947 std::vector<int8_t> inputToCellWeights = { -2, 1, -2, 4, -3, -2, -4, 3, -2, -2, -6, 3 };
1948 std::vector<int8_t> inputToOutputWeights = { 2, 5, -4, 5, 2, -3, 5, 7, 3, -5, 1, -4 };
1949
1950 std::vector<int8_t> recurrentToForgetWeights = { -1, 1, -1, 1, -3, -4, -1, 4, 2, 3, 5, -1, 1, 3, -2, -1 };
1951 std::vector<int8_t> recurrentToCellWeights = { -2, -3, -1, -3, -4, 2, 1, -1, 2, 2, 1, 2, 3, -2, 3, -3 };
1952 std::vector<int8_t> recurrentToOutputWeights = { -3, 3, -1, -2, -2, -2, -1, -5, 1, 3, -4, -1, -1, -1, 2, -1 };
1953
1954 std::vector<int8_t> cellToForgetWeights = { 47, -52, -24, 31 };
1955 std::vector<int8_t> cellToOutputWeights = { -17, 82, 85, -77 };
1956
1957 std::vector<float> forgetGateBias = { 1., 1., 1., 1. };
1958 std::vector<float> cellBias = { 0., 0., 0., 0. };
1959 std::vector<float> outputGateBias = { 0., 0., 0., 0. };
1960
1961 armnn::ScopedTensorHandle inputToForgetWeightsTensor(tensorInfoNumInput);
1962 armnn::ScopedTensorHandle inputToCellWeightsTensor(tensorInfoNumInput);
1963 armnn::ScopedTensorHandle inputToOutputWeightsTensor(tensorInfoNumInput);
1964 armnn::ScopedTensorHandle recurrentToForgetWeightsTensor(tensorInfoNumOutput);
1965 armnn::ScopedTensorHandle recurrentToCellWeightsTensor(tensorInfoNumOutput);
1966 armnn::ScopedTensorHandle recurrentToOutputWeightsTensor(tensorInfoNumOutput);
1967 armnn::ScopedTensorHandle cellToForgetWeightsTensor(tensorInfoNum);
1968 armnn::ScopedTensorHandle cellToOutputWeightsTensor(tensorInfoNum);
1969 armnn::ScopedTensorHandle forgetGateBiasTensor(tensorInfoNumFp);
1970 armnn::ScopedTensorHandle cellBiasTensor(tensorInfoNumFp);
1971 armnn::ScopedTensorHandle outputGateBiasTensor(tensorInfoNumFp);
1972
1973 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, inputToForgetWeights.data());
1974 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, inputToCellWeights.data());
1975 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, inputToOutputWeights.data());
1976 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, recurrentToForgetWeights.data());
1977 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, recurrentToCellWeights.data());
1978 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, recurrentToOutputWeights.data());
1979 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, cellToForgetWeights.data());
1980 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, cellToOutputWeights.data());
1981 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, forgetGateBias.data());
1982 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, cellBias.data());
1983 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, outputGateBias.data());
1984
1985 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1986 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1987 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1988 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1989 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1990 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1991 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1992 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1993 data.m_ForgetGateBias = &forgetGateBiasTensor;
1994 data.m_CellBias = &cellBiasTensor;
1995 data.m_OutputGateBias = &outputGateBiasTensor;
1996
1997 // Flags to set test configuration
1998 data.m_Parameters.m_ClippingThresCell = 10;
1999 data.m_Parameters.m_ClippingThresProj = 0;
2000 data.m_Parameters.m_ActivationFunc = 4;
2001 data.m_Parameters.m_CifgEnabled = true;
2002 data.m_Parameters.m_PeepholeEnabled = true;
2003 data.m_Parameters.m_ProjectionEnabled = false;
2004 data.m_Parameters.m_TimeMajor = false;
2005
Teresa Charlin611c7fb2022-01-07 09:47:29 +00002006 std::unique_ptr<armnn::IWorkload> workload
2007 = workloadFactory.CreateWorkload(armnn::LayerType::UnidirectionalSequenceLstm, data, info);
Narumol Prangnawaratbd575b22021-08-31 16:53:54 +01002008 inputHandle->Allocate();
2009 outputStateInHandle->Allocate();
2010 cellStateInHandle->Allocate();
2011
2012 outputHandle->Allocate();
2013
2014 CopyDataToITensorHandle(inputHandle.get(), inputVector.data());
2015 CopyDataToITensorHandle(outputStateInHandle.get(), outputStateInVector.data());
2016 CopyDataToITensorHandle(cellStateInHandle.get(), cellStateInVector.data());
2017
2018 workload->Execute();
2019
2020 CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
2021
2022 return LayerTestResult<float, 3>(actualOutput,
2023 outputVector,
2024 outputHandle->GetShape(),
2025 outputTensorInfo.GetShape());
2026}