blob: 6cea777a25c11e113db63bac6a5adf22ac906de8 [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
telsoa01c577f2c2018-08-31 09:22:23 +01005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "LstmTestImpl.hpp"
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01008#include <QuantizeHelper.hpp>
9
telsoa01c577f2c2018-08-31 09:22:23 +010010#include <armnn/ArmNN.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010011
12#include <backendsCommon/CpuTensorHandle.hpp>
13
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010014#include <backendsCommon/test/TensorCopyUtils.hpp>
15#include <backendsCommon/test/WorkloadTestUtils.hpp>
16
17#include <reference/workloads/Decoders.hpp>
18#include <reference/workloads/Encoders.hpp>
19#include <reference/workloads/LstmUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010020
David Beckac42efd2018-09-26 17:41:13 +010021#include <test/TensorHelpers.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010022
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010023#include <boost/multi_array.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010024
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010025namespace
26{
Jan Eilers38e05bd2019-06-26 13:10:09 +010027
28template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
29void LstmUtilsVectorBatchVectorAddTestImpl(
30 boost::multi_array<float, 1>& vec,
31 boost::multi_array<float, 2>& batchVec,
32 uint32_t vSize,
33 uint32_t nBatch,
34 boost::multi_array<float, 2>& expectedOutput )
35{
36 float qScale = 0.0f;
37 int32_t qOffset = 0;
38 armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
39
40 // Make encoder and decoder
41 std::unique_ptr<armnn::Decoder<float>> vecDecoder = armnn::MakeDecoder<float>(tensorInfo, vec.data());
42 std::unique_ptr<armnn::Decoder<float>> batchVecDecoder = armnn::MakeDecoder<float>(tensorInfo, batchVec.data());
43 std::unique_ptr<armnn::Encoder<float>> batchVecEncoder = armnn::MakeEncoder<float>(tensorInfo, batchVec.data());
44
45 VectorBatchVectorAdd(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
46
47 // check shape and compare values
48 BOOST_TEST(CompareTensors(batchVec, expectedOutput));
49
50 // check if iterator is back at start position
51 batchVecEncoder->Set(1.0f);
52 BOOST_TEST(batchVec[0][0] == 1.0f);
53}
54
55template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
56void LstmUtilsZeroVectorTestImpl(
57 boost::multi_array<float, 1>& input,
58 uint32_t vSize,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010059 boost::multi_array<float, 1>& expectedOutput)
60{
Jan Eilers38e05bd2019-06-26 13:10:09 +010061 float qScale = 0.0f;
62 int32_t qOffset = 0;
63
64 armnn::TensorInfo tensorInfo({vSize}, ArmnnType, qScale, qOffset );
65
66 // Make encoder for input
67 std::unique_ptr<armnn::Encoder<float>> outputEncoder = armnn::MakeEncoder<float>(tensorInfo, input.data());
68
69 // call ZeroVector
70 ZeroVector(*outputEncoder, vSize);
71
72 // check shape and compare values
73 BOOST_TEST(CompareTensors(input, expectedOutput));
74
75 // check if iterator is back at start position
76 outputEncoder->Set(1.0f);
77 BOOST_TEST(input[0] == 1.0f);
78
79}
80
Jan Eilers38e05bd2019-06-26 13:10:09 +010081template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
82void LstmUtilsMeanStddevNormalizationTestImpl(
83 boost::multi_array<float, 2>& input,
84 uint32_t vSize,
85 uint32_t nBatch,
86 boost::multi_array<float, 2>& expectedOutput)
87{
88 float qScale = 0.0f;
89 int32_t qOffset = 0;
90 armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
91
92 // Make encoder and decoder for input
93 std::unique_ptr<armnn::Decoder<float>> inputDecoder = armnn::MakeDecoder<float>(tensorInfo, input.data());
94 std::unique_ptr<armnn::Encoder<float>> outputEncoder = armnn::MakeEncoder<float>(tensorInfo, input.data());
95
96 MeanStddevNormalization(*inputDecoder, *outputEncoder, vSize, nBatch, 1e-8f);
97
98 // check shape and compare values
99 BOOST_TEST(CompareTensors(input, expectedOutput));
100
101 // check if iterator is back at start position
102 outputEncoder->Set(1.0f);
103 BOOST_TEST(input[0][0] == 1.0f);
104}
105
106template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
107void LstmUtilsVectorBatchVectorCwiseProductTestImpl(
108 boost::multi_array<float, 1>& vec,
109 boost::multi_array<float, 2>& batchVec,
110 uint32_t vSize,
111 uint32_t nBatch,
112 boost::multi_array<float, 2>& expectedOutput)
113{
114 float qScale = 0.0f;
115 int32_t qOffset = 0;
116 armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
117
118 // Make encoder and decoder
119 std::unique_ptr<armnn::Decoder<float>> vecDecoder = armnn::MakeDecoder<float>(tensorInfo, vec.data());
120 std::unique_ptr<armnn::Decoder<float>> batchVecDecoder = armnn::MakeDecoder<float>(tensorInfo, batchVec.data());
121 std::unique_ptr<armnn::Encoder<float>> batchVecEncoder = armnn::MakeEncoder<float>(tensorInfo, batchVec.data());
122
123 VectorBatchVectorCwiseProduct(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
124
125 // check shape and compare values
126 BOOST_TEST(CompareTensors(batchVec, expectedOutput));
127
128 // check if iterator is back at start position
129 batchVecEncoder->Set(1.0f);
130 BOOST_TEST(batchVec[0][0] == 1.0f);
131}
132
133// Lstm Layer tests:
James Conroy9c3cae82019-08-01 16:01:48 +0100134// *********************************** //
Conor Kennedyb9971c92019-05-07 07:14:23 +0100135template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
136LayerTestResult<T, 2>
137LstmNoCifgNoPeepholeNoProjectionTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000138 armnn::IWorkloadFactory& workloadFactory,
139 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Conor Kennedyb9971c92019-05-07 07:14:23 +0100140 const boost::multi_array<T, 2>& input,
141 const boost::multi_array<T, 2>& outputExpected,
142 float qScale = 0.0f,
143 int32_t qOffset = 0,
144 armnn::DataType constantDataType = armnn::DataType::Float32)
telsoa01c577f2c2018-08-31 09:22:23 +0100145{
146 unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
147 unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
148 unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
149 // cellSize and outputSize have the same size when there is no projection.
150 unsigned numUnits = outputSize;
151
Conor Kennedyb9971c92019-05-07 07:14:23 +0100152 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset );
153 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
154 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100155
Conor Kennedyb9971c92019-05-07 07:14:23 +0100156 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
157 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
158 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
159 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100160
Conor Kennedyb9971c92019-05-07 07:14:23 +0100161 LayerTestResult<T, 2> ret(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100162
163 std::vector<float> inputVector;
164 inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
165 auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
166
167 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
168 auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
169
170 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
171 auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
172
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000173 std::vector<float> scratchBufferVector(batchSize * numUnits * 4, 0.f);
telsoa01c577f2c2018-08-31 09:22:23 +0100174 auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
175
176 std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
177 auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
178
179 std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
180 auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
181
182 std::vector<float> outputVector;
183 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
184 ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
185
186 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
187 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
188 workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
189 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
190 workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
191
192 std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
193 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
194 workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
195 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
196 workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
197 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
198
199
200 armnn::LstmQueueDescriptor data;
201 armnn::WorkloadInfo info;
202
203 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
204 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
205 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
206
207 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
208 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
209 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
210 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
211
Conor Kennedyb9971c92019-05-07 07:14:23 +0100212 armnn::TensorInfo tensorInfo4({numUnits}, constantDataType , qScale, qOffset);
213 armnn::TensorInfo tensorInfo8({numUnits, 2}, constantDataType, qScale, qOffset);
214 armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100215
216 auto inputToInputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.45018822f, -0.02338299f, -0.0870589f,
217 -0.34550029f, 0.04266912f, -0.15680569f,
218 -0.34856534f, 0.43890524f});
219
220 auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfo8, {0.09701663f, 0.20334584f, -0.50592935f,
221 -0.31343272f, -0.40032279f, 0.44781327f,
222 0.01387155f, -0.35593212f});
223
224 auto inputToCellWeights = MakeTensor<float, 2>(tensorInfo8, {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f,
225 -0.20583314f, 0.44344562f, 0.22077113f,
226 -0.29909778f});
227
228 auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.25065863f, -0.28290087f, 0.04613829f,
229 0.40525138f, 0.44272184f, 0.03897077f,
230 -0.1556896f, 0.19487578f});
231
232 auto recurrentToInputWeights = MakeTensor<float, 2>(tensorInfo16, {-0.0063535f, -0.2042388f, 0.31454784f,
233 -0.35746509f, 0.28902304f, 0.08183324f,
234 -0.16555229f, 0.02286911f, -0.13566875f,
235 0.03034258f, 0.48091322f, -0.12528998f,
236 0.24077177f, -0.51332325f, -0.33502164f,
237 0.10629296f});
238
239 auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfo16, {-0.48684245f, -0.06655136f, 0.42224967f,
240 0.2112639f, 0.27654213f, 0.20864892f,
241 -0.07646349f, 0.45877004f, 0.00141793f,
242 -0.14609534f, 0.36447752f, 0.09196436f,
243 0.28053468f, 0.01560611f, -0.20127171f,
244 -0.01140004f});
245
246 auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfo16, {-0.3407414f, 0.24443203f, -0.2078532f,
247 0.26320225f, 0.05695659f, -0.00123841f,
248 -0.4744786f, -0.35869038f, -0.06418842f,
249 -0.13502428f, -0.501764f, 0.22830659f,
250 -0.46367589f, 0.26016325f, -0.03894562f,
251 -0.16368064f});
252
253 auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfo16, {0.43385774f, -0.17194885f, 0.2718237f,
254 0.09215671f, 0.24107647f, -0.39835793f,
255 0.18212086f, 0.01301402f, 0.48572797f,
256 -0.50656658f, 0.20047462f, -0.20607421f,
257 -0.51818722f, -0.15390486f, 0.0468148f,
258 0.39922136f});
259
260 auto cellToInputWeights = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
261
262 auto inputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
263
264 auto forgetGateBias = MakeTensor<float, 1>(tensorInfo4, {1., 1., 1., 1.});
265
266 auto cellBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
267
268 auto outputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
269
270 armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo8);
271 armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo8);
272 armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo8);
273 armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo8);
telsoa01c577f2c2018-08-31 09:22:23 +0100274 armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000275 armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
telsoa01c577f2c2018-08-31 09:22:23 +0100276 armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
277 armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
278 armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
279 armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
280 armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
281 armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
282 armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
283
284 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
285 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
286 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
287 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
288 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
289 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
290 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
291 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
292 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
293 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
294 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
295 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
296 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
297
298 data.m_InputToInputWeights = &inputToInputWeightsTensor;
299 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
300 data.m_InputToCellWeights = &inputToCellWeightsTensor;
301 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
302 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
303 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
304 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
305 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
telsoa01c577f2c2018-08-31 09:22:23 +0100306 data.m_InputGateBias = &inputGateBiasTensor;
307 data.m_ForgetGateBias = &forgetGateBiasTensor;
308 data.m_CellBias = &cellBiasTensor;
309 data.m_OutputGateBias = &outputGateBiasTensor;
310
telsoa01c577f2c2018-08-31 09:22:23 +0100311 // Flags to set test configuration
312 data.m_Parameters.m_ActivationFunc = 4;
313 data.m_Parameters.m_CifgEnabled = false;
314 data.m_Parameters.m_PeepholeEnabled = false;
315 data.m_Parameters.m_ProjectionEnabled = false;
316
telsoa01c577f2c2018-08-31 09:22:23 +0100317 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
318 inputHandle->Allocate();
319 outputStateInHandle->Allocate();
320 cellStateInHandle->Allocate();
321
322 scratchHandle->Allocate();
323 outputStateOutHandle->Allocate();
324 cellStateOutHandle->Allocate();
325 outputHandle->Allocate();
326
327 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
328 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
329 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
330
telsoa01c577f2c2018-08-31 09:22:23 +0100331 workload->Execute();
332
333 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
334
335 return ret;
336}
337
Conor Kennedyb9971c92019-05-07 07:14:23 +0100338template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
339LayerTestResult<T, 2>
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000340LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory,
341 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Conor Kennedyb9971c92019-05-07 07:14:23 +0100342 const boost::multi_array<T, 2>& input,
343 const boost::multi_array<T, 2>& outputExpected,
344 float qScale = 0.0f,
345 int32_t qOffset = 0,
346 armnn::DataType constantDataType = armnn::DataType::Float32)
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000347{
telsoa01c577f2c2018-08-31 09:22:23 +0100348 unsigned int batchSize = 2;
349 unsigned int outputSize = 16;
350 unsigned int inputSize = 5;
351 unsigned numUnits = 20;
352
Conor Kennedyb9971c92019-05-07 07:14:23 +0100353 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset);
354 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
355 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100356
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000357 // Scratch buffer size without CIFG [batchSize, numUnits * 4]
Conor Kennedyb9971c92019-05-07 07:14:23 +0100358 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
359 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
360 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
361 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100362
Conor Kennedyb9971c92019-05-07 07:14:23 +0100363 LayerTestResult<T, 2> ret(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100364
365 std::vector<float> inputVector;
366 inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
367 auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
368
369 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
370 auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
371
372 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
373 auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
374
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000375 std::vector<float> scratchBufferVector(batchSize * numUnits * 4, 0.f);
telsoa01c577f2c2018-08-31 09:22:23 +0100376 auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
377
378 std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
379 auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
380
381 std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
382 auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
383
384 std::vector<float> outputVector;
385 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
386 ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
387
388 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
389 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
390 workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
391 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
392 workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
393
394 std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
395 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
396 workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
397 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
398 workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
399 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
400
401 armnn::LstmQueueDescriptor data;
402 armnn::WorkloadInfo info;
403
404 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
405 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
406 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
David Beckac42efd2018-09-26 17:41:13 +0100407
telsoa01c577f2c2018-08-31 09:22:23 +0100408 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
409 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
410 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
411 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
412
Conor Kennedyb9971c92019-05-07 07:14:23 +0100413 armnn::TensorInfo tensorInfo16({outputSize}, constantDataType, qScale, qOffset);
414 armnn::TensorInfo tensorInfo20({numUnits}, constantDataType, qScale, qOffset);
415 armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, constantDataType, qScale, qOffset);
416 armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, constantDataType, qScale, qOffset);
417 armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, constantDataType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100418
419 auto inputToInputWeights =
420 MakeTensor<float, 2>(tensorInfo20x5, {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f,
421 0.09171803f, 0.14647801f,0.10797193f, -0.0057968358f,0.0019193048f,
422 -0.2726754f, 0.10154029f, -0.018539885f, 0.080349885f, -0.10262385f,
423 -0.022599787f,-0.09121155f, -0.008675967f, -0.045206103f,-0.0821282f,
424 -0.008045952f,0.015478081f, 0.055217247f, 0.038719587f, 0.044153627f,
425 -0.06453243f,0.05031825f, -0.046935108f, -0.008164439f, 0.014574226f,
426 -0.1671009f, -0.15519552f, -0.16819797f,-0.13971269f,-0.11953059f,
427 0.25005487f, -0.22790983f, 0.009855087f, -0.028140958f, -0.11200698f,
428 0.11295408f, -0.0035217577f, 0.054485075f, 0.05184695f, 0.064711206f,
429 0.10989193f, 0.11674786f, 0.03490607f, 0.07727357f, 0.11390585f,
430 -0.1863375f, -0.1034451f, -0.13945189f, -0.049401227f, -0.18767063f,
431 0.042483903f, 0.14233552f, 0.13832581f, 0.18350165f, 0.14545603f,
432 -0.028545704f,0.024939531f,0.050929718f,0.0076203286f,-0.0029723682f,
433 -0.042484224f, -0.11827596f, -0.09171104f, -0.10808628f,-0.16327988f,
434 -0.2273378f, -0.0993647f, -0.017155107f,0.0023917493f,0.049272764f,
435 0.0038534778f, 0.054764505f, 0.089753784f, 0.06947234f, 0.08014476f,
436 -0.04544234f, -0.0497073f,-0.07135631f, -0.048929106f,-0.004042012f,
437 -0.009284026f, 0.018042054f, 0.0036860977f,-0.07427302f, -0.11434604f,
438 -0.018995456f, 0.031487543f, 0.012834908f,0.019977754f,0.044256654f,
439 -0.39292613f, -0.18519334f, -0.11651281f,-0.06809892f, 0.011373677f
440 });
441
442 auto inputToForgetWeights =
443 MakeTensor<float, 2>(tensorInfo20x5, {-0.0018401089f, -0.004852237f,0.03698424f, 0.014181704f,0.028273236f,
444 -0.016726194f, -0.05249759f,-0.10204261f, 0.00861066f,-0.040979505f,
445 -0.009899187f,0.01923892f,-0.028177269f, -0.08535103f,-0.14585495f,
446 0.10662567f,-0.01909731f,-0.017883534f,-0.0047269356f,-0.045103323f,
447 0.0030784295f,0.076784775f,0.07463696f, 0.094531395f,0.0814421f,
448 -0.12257899f, -0.033945758f,-0.031303465f, 0.045630626f,0.06843887f,
449 -0.13492945f, -0.012480007f,-0.0811829f, -0.07224499f,-0.09628791f,
450 0.045100946f,0.0012300825f, 0.013964662f, 0.099372394f,0.02543059f,
451 0.06958324f, 0.034257296f, 0.0482646f, 0.06267997f,0.052625068f,
452 0.12784666f, 0.07077897f, 0.025725935f, 0.04165009f,0.07241905f,
453 0.018668644f, -0.037377294f,-0.06277783f,-0.08833636f,-0.040120605f,
454 -0.011405586f,-0.007808335f,-0.010301386f,-0.005102167f,0.027717464f,
455 0.05483423f, 0.11449111f, 0.11289652f,0.10939839f, 0.13396506f,
456 -0.08402166f,-0.01901462f, -0.044678304f,-0.07720565f,0.014350063f,
457 -0.11757958f, -0.0652038f, -0.08185733f,-0.076754324f,-0.092614375f,
458 0.10405491f, 0.052960336f, 0.035755895f,0.035839386f,-0.012540553f,
459 0.036881298f, 0.02913376f, 0.03420159f,0.05448447f,-0.054523353f,
460 0.02582715f, 0.02327355f, -0.011857179f,-0.0011980024f,-0.034641717f,
461 -0.026125094f,-0.17582615f,-0.15923657f,-0.27486774f,-0.0006143371f,
462 0.0001771948f, -8.470171e-05f, 0.02651807f,0.045790765f,0.06956496f
463 });
464
465 auto inputToCellWeights =
466 MakeTensor<float, 2>(tensorInfo20x5, {-0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
467 -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
468 -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
469 -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
470 -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
471 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f,
472 -0.13002433f, -0.036816437f, -0.02130134f, -0.016518239f,
473 0.0047691227f, -0.0025825808f, 0.066017866f, 0.029991534f,
474 -0.10652836f, -0.1037554f, -0.13056071f, -0.03266643f,
475 -0.033702414f, -0.006473424f, -0.04611692f, 0.014419339f,
476 -0.025174323f, 0.0396852f, 0.081777506f, 0.06157468f,
477 0.10210095f, -0.009658194f, 0.046511717f, 0.03603906f,
478 0.0069369148f, 0.015960095f, -0.06507666f, 0.09551598f,
479 0.053568836f, 0.06408714f, 0.12835667f, -0.008714329f,
480 -0.20211966f, -0.12093674f, 0.029450472f, 0.2849013f,
481 -0.029227901f, 0.1164364f, -0.08560263f, 0.09941786f,
482 -0.036999565f, -0.028842626f, -0.0033637602f, -0.017012902f,
483 -0.09720865f, -0.11193351f, -0.029155117f, -0.017936034f,
484 -0.009768936f, -0.04223324f, -0.036159635f, 0.06505112f,
485 -0.021742892f, -0.023377212f, -0.07221364f, -0.06430552f,
486 0.05453865f, 0.091149814f, 0.06387331f, 0.007518393f,
487 0.055960953f, 0.069779344f, 0.046411168f, 0.10509911f,
488 0.07463894f, 0.0075130584f, 0.012850982f, 0.04555431f,
489 0.056955688f, 0.06555285f, 0.050801456f, -0.009862683f,
490 0.00826772f, -0.026555609f, -0.0073611983f, -0.0014897042f
491 });
492
493 auto inputToOutputWeights =
494 MakeTensor<float, 2>(tensorInfo20x5, {-0.0998932f, -0.07201956f, -0.052803773f,-0.15629593f,-0.15001918f,
495 -0.07650751f,0.02359855f, -0.075155355f, -0.08037709f, -0.15093534f,
496 0.029517552f, -0.04751393f, 0.010350531f,-0.02664851f, -0.016839722f,
497 -0.023121163f, 0.0077019283f, 0.012851257f, -0.05040649f,-0.0129761f,
498 -0.021737747f,-0.038305793f,-0.06870586f, -0.01481247f,-0.001285394f,
499 0.10124236f, 0.083122835f, 0.053313006f,-0.062235646f,-0.075637154f,
500 -0.027833903f, 0.029774971f, 0.1130802f, 0.09218906f, 0.09506135f,
501 -0.086665764f,-0.037162706f,-0.038880914f,-0.035832845f,-0.014481564f,
502 -0.09825003f,-0.12048569f,-0.097665586f,-0.05287633f, -0.0964047f,
503 -0.11366429f, 0.035777505f, 0.13568819f, 0.052451383f,0.050649304f,
504 0.05798951f, -0.021852335f,-0.099848844f,0.014740475f,-0.078897946f,
505 0.04974699f, 0.014160473f, 0.06973932f, 0.04964942f, 0.033364646f,
506 0.08190124f, 0.025535367f, 0.050893165f, 0.048514254f,0.06945813f,
507 -0.078907564f,-0.06707616f, -0.11844508f, -0.09986688f,-0.07509403f,
508 0.06263226f, 0.14925587f, 0.20188436f, 0.12098451f,0.14639415f,
509 0.0015017595f, -0.014267382f, -0.03417257f,0.012711468f,0.0028300495f,
510 -0.024758482f, -0.05098548f,-0.0821182f, 0.014225672f, 0.021544158f,
511 0.08949725f, 0.07505268f, -0.0020780868f, 0.04908258f,0.06476295f,
512 -0.022907063f,0.027562456f,0.040185735f, 0.019567577f,-0.015598739f,
513 -0.049097303f, -0.017121866f, -0.083368234f,-0.02332002f,-0.0840956f
514 });
515
516 auto inputGateBias =
517 MakeTensor<float, 1>(tensorInfo20, {0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
518 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
519 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
520 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
521 });
522
523 auto forgetGateBias =
524 MakeTensor<float, 1>(tensorInfo20, {0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f,
525 0.11098921f, 0.15378423f, 0.09263801f, 0.09790885f,
526 0.09508917f, 0.061199076f, 0.07665568f, -0.015443159f,
527 -0.03499149f, 0.046190713f, 0.08895977f, 0.10899629f,
528 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
529 });
530
531 auto cellBias =
532 MakeTensor<float, 1>(tensorInfo20, {-0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f,
533 -0.1483596f, -0.10639995f, -0.091433935f, 0.058573797f,
534 -0.06809782f, -0.07889636f, -0.043246906f, -0.09829136f,
535 -0.4279842f, 0.034901652f, 0.18797937f, 0.0075234566f,
536 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
537 });
538
539 auto outputGateBias =
540 MakeTensor<float, 1>(tensorInfo20, {0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
541 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
542 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
543 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
544 });
545
546 auto recurrentToInputWeights =
547 MakeTensor<float, 2>(tensorInfo20x16, {-0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
548 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
549 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
550 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
551 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f,
552 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
553 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
554 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
555 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f,
556 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
557 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
558 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
559 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f,
560 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
561 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
562 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
563 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f,
564 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
565 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
566 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
567 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f,
568 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
569 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
570 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
571 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f,
572 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
573 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
574 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
575 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f,
576 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
577 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
578 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
579 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f,
580 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
581 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
582 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
583 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f,
584 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
585 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
586 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
587 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f,
588 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
589 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
590 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
591 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f,
592 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
593 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
594 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
595 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f,
596 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
597 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
598 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
599 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f,
600 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
601 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
602 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
603 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f,
604 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
605 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
606 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
607 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f,
608 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
609 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
610 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
611 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f,
612 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
613 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
614 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
615 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f,
616 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
617 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
618 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
619 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f,
620 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
621 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
622 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
623 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f,
624 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
625 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
626 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
627 });
628
629 auto recurrentToForgetWeights =
630 MakeTensor<float, 2>(tensorInfo20x16, {-0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
631 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
632 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
633 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
634 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f,
635 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
636 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
637 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
638 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f,
639 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
640 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
641 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
642 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f,
643 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
644 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
645 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
646 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f,
647 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
648 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
649 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
650 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f,
651 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
652 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
653 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
654 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f,
655 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
656 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
657 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
658 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f,
659 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
660 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
661 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
662 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f,
663 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
664 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
665 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
666 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f,
667 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
668 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
669 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
670 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f,
671 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
672 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
673 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
674 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f,
675 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
676 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
677 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
678 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f,
679 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
680 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
681 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
682 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f,
683 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
684 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
685 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
686 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f,
687 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
688 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
689 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
690 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f,
691 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
692 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
693 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
694 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f,
695 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
696 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
697 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
698 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f,
699 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
700 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
701 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
702 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f,
703 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
704 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
705 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
706 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f,
707 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
708 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
709 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
710 });
711
712 auto recurrentToCellWeights =
713 MakeTensor<float, 2>(tensorInfo20x16, {-0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
714 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
715 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
716 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
717 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
718 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
719 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
720 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
721 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
722 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
723 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
724 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
725 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
726 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
727 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
728 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
729 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
730 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
731 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
732 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
733 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
734 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
735 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
736 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
737 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
738 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
739 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
740 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
741 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
742 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
743 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
744 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
745 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
746 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
747 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
748 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
749 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
750 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
751 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
752 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
753 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
754 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
755 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
756 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
757 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
758 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
759 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
760 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
761 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
762 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
763 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
764 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
765 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
766 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
767 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
768 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
769 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
770 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
771 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
772 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
773 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
774 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
775 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
776 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
777 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
778 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
779 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
780 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
781 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
782 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
783 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
784 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
785 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
786 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
787 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
788 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
789 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
790 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
791 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
792 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
793 });
794
795 auto recurrentToOutputWeights =
796 MakeTensor<float, 2>(tensorInfo20x16, {0.025825322f, -0.05813119f, 0.09495884f,-0.045984812f, -0.01255415f,
797 -0.0026479573f,-0.08196161f,-0.054914974f,-0.0046604523f,
798 -0.029587349f, -0.044576716f, -0.07480124f, -0.082868785f,
799 0.023254942f, 0.027502948f, -0.0039728214f, -0.08683098f,
800 -0.08116779f, -0.014675607f, -0.037924774f, -0.023314456f,
801 -0.007401714f, -0.09255757f, 0.029460307f, -0.08829125f,
802 -0.005139627f, -0.08989442f, -0.0555066f, 0.13596267f,
803 -0.025062224f, -0.048351806f, -0.03850004f, 0.07266485f,
804 -0.022414139f, 0.05940088f, 0.075114764f, 0.09597592f,
805 -0.010211725f, -0.0049794707f, -0.011523867f, -0.025980417f,
806 0.072999895f, 0.11091378f, -0.081685916f, 0.014416728f,
807 0.043229222f, 0.034178585f, -0.07530371f, 0.035837382f,
808 -0.085607f, -0.007721233f, -0.03287832f, -0.043848954f,
809 -0.06404588f, -0.06632928f, -0.073643476f, 0.008214239f,
810 -0.045984086f, 0.039764922f, 0.03474462f, 0.060612556f,
811 -0.080590084f, 0.049127717f, 0.04151091f, -0.030063879f,
812 0.008801774f, -0.023021035f, -0.019558564f, 0.05158114f,
813 -0.010947698f, -0.011825728f, 0.0075720972f, 0.0699727f,
814 -0.0039981045f, 0.069350146f, 0.08799282f, 0.016156472f,
815 0.035502106f, 0.11695009f, 0.006217345f, 0.13392477f,
816 -0.037875112f, 0.025745004f, 0.08940699f, -0.00924166f,
817 0.0046702605f, -0.036598757f, -0.08811812f, 0.10522024f,
818 -0.032441203f, 0.008176899f, -0.04454919f, 0.07058152f,
819 0.0067963637f, 0.039206743f, 0.03259838f, 0.03725492f,
820 -0.09515802f, 0.013326398f, -0.052055415f, -0.025676316f,
821 0.03198509f, -0.015951829f, -0.058556724f, 0.036879618f,
822 0.043357447f, 0.028362012f, -0.05908629f, 0.0059240665f,
823 -0.04995891f, -0.019187413f,0.0276265f, -0.01628143f, 0.0025863599f,
824 0.08800015f, 0.035250366f, -0.022165963f, -0.07328642f,
825 -0.009415526f, -0.07455109f, 0.11690406f, 0.0363299f,
826 0.07411125f, 0.042103454f, -0.009660886f, 0.019076364f,
827 0.018299393f, -0.046004917f, 0.08891175f,0.0431396f, -0.026327137f,
828 -0.051502608f, 0.08979574f, -0.051670972f, 0.04940282f,
829 -0.07491107f, -0.021240504f, 0.022596184f, -0.034280192f,
830 0.060163025f, -0.058211457f, -0.051837247f, -0.01349775f,
831 -0.04639988f, -0.035936575f, -0.011681591f, 0.064818054f,
832 0.0073146066f, -0.021745546f, -0.043124277f, -0.06471268f,
833 -0.07053354f, -0.029321948f, -0.05330136f, 0.016933719f,
834 -0.053782392f, 0.13747959f, -0.1361751f, -0.11569455f,
835 0.0033329215f, 0.05693899f, -0.053219706f, 0.063698f,
836 0.07977434f, -0.07924483f, 0.06936997f, 0.0034815092f,
837 -0.007305279f, -0.037325785f, -0.07251102f, -0.033633437f,
838 -0.08677009f, 0.091591336f, -0.14165086f, 0.021752775f,
839 0.019683983f, 0.0011612234f, -0.058154266f, 0.049996935f,
840 0.0288841f, -0.0024567875f, -0.14345716f, 0.010955264f,-0.10234828f,
841 0.1183656f, -0.0010731248f, -0.023590032f,-0.072285876f,-0.0724771f,
842 -0.026382286f, -0.0014920527f, 0.042667855f, 0.0018776858f,
843 0.02986552f, 0.009814309f, 0.0733756f, 0.12289186f,
844 0.018043943f, -0.0458958f, 0.049412545f, 0.033632483f,
845 0.05495232f, 0.036686596f, -0.013781798f, -0.010036754f,
846 0.02576849f, -0.08307328f, 0.010112348f, 0.042521734f,
847 -0.05869831f, -0.071689695f, 0.03876447f, -0.13275425f, -0.0352966f,
848 -0.023077697f, 0.10285965f, 0.084736146f, 0.15568255f,
849 -0.00040734606f, 0.027835453f, -0.10292561f, -0.032401145f,
850 0.10053256f, -0.026142767f, -0.08271222f, -0.0030240538f,
851 -0.016368777f, 0.1070414f, 0.042672627f, 0.013456989f,
852 -0.0437609f, -0.022309763f, 0.11576483f, 0.04108048f,
853 0.061026827f, -0.0190714f, -0.0869359f, 0.037901703f, 0.0610107f,
854 0.07202949f, 0.01675338f, 0.086139716f, -0.08795751f,
855 -0.014898893f, -0.023771819f, -0.01965048f, 0.007955471f,
856 -0.043740474f, 0.03346837f, -0.10549954f, 0.090567775f,
857 0.042013682f, -0.03176985f, 0.12569028f, -0.02421228f,
858 -0.029526481f, 0.023851605f, 0.031539805f, 0.05292009f,
859 -0.02344001f, -0.07811758f, -0.08834428f, 0.10094801f,
860 0.16594367f, -0.06861939f, -0.021256343f, -0.041093912f,
861 -0.06669611f, 0.035498552f, 0.021757556f, -0.09302526f,
862 -0.015403468f, -0.06614931f, -0.051798206f, -0.013874718f,
863 0.03630673f, 0.010412845f, -0.08077351f, 0.046185967f,
864 0.0035662893f, 0.03541868f, -0.094149634f, -0.034814864f,
865 0.003128424f, -0.020674974f, -0.03944324f, -0.008110165f,
866 -0.11113267f, 0.08484226f, 0.043586485f, 0.040582247f,
867 0.0968012f, -0.065249965f, -0.028036479f, 0.0050708856f,
868 0.0017462453f, 0.0326779f, 0.041296225f, 0.09164146f,
869 -0.047743853f, -0.015952192f, -0.034451712f, 0.084197424f,
870 -0.05347844f, -0.11768019f, 0.085926116f, -0.08251791f,
871 -0.045081906f, 0.0948852f, 0.068401024f, 0.024856757f,
872 0.06978981f, -0.057309967f, -0.012775832f, -0.0032452994f,
873 0.01977615f, -0.041040014f, -0.024264973f,0.063464895f, 0.05431621f
874 });
875
876 auto cellToInputWeights =
877 MakeTensor<float, 1>(tensorInfo20, {0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
878 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
879 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f,-0.052169047f,
880 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
881 });
882
883
884 auto cellToForgetWeights =
885 MakeTensor<float, 1>(tensorInfo20, {-0.01998659f,-0.15568835f,-0.24248174f, -0.012770197f, 0.041331276f,
886 -0.072311886f, -0.052123554f,-0.0066330447f,-0.043891653f,0.036225766f,
887 -0.047248036f, 0.021479502f,0.033189066f, 0.11952997f, -0.020432774f,
888 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
889 });
890
891 auto cellToOutputWeights =
892 MakeTensor<float, 1>(tensorInfo20, {0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
893 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
894 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
895 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
896 });
897
898 auto projectionWeights =
899 MakeTensor<float, 2>(tensorInfo16x20,
900 {-0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
901 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
902 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
903 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
904 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
905 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
906 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
907 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
908 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
909 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
910 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
911 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
912 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
913 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
914 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
915 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
916 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
917 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
918 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
919 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
920 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
921 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
922 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
923 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
924 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
925 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
926 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
927 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
928 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
929 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
930 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
931 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
932 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
933 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
934 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
935 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
936 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
937 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
938 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
939 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
940 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
941 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
942 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
943 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
944 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
945 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
946 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
947 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
948 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
949 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
950 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
951 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
952 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
953 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
954 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
955 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
956 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
957 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
958 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
959 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
960 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
961 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
962 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
963 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
964 });
965
966 std::vector<float> projectionBiasVector(outputSize, 0.f);
967 auto projectionBias = MakeTensor<float,1>(tensorInfo16, projectionBiasVector);
968
969 armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
970 armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
971 armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo20x5);
972 armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo20x5);
973 armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo20x16);
974 armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo20x16);
975 armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo20x16);
976 armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo20x16);
977 armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo20);
978 armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo20);
979 armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo20);
980 armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo20);
981 armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo20);
982 armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo20);
983 armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo20);
984 armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo16x20);
985 armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo16);
986
987 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
988 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
989 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
990 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
991 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
992 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
993 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
994 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
995 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
996 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
997 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
998 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
999 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1000 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1001 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1002 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
1003 AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]);
1004
1005 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1006 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1007 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1008 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1009 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1010 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1011 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1012 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1013 data.m_CellToInputWeights = &cellToInputWeightsTensor;
1014 data.m_InputGateBias = &inputGateBiasTensor;
1015 data.m_ForgetGateBias = &forgetGateBiasTensor;
1016 data.m_CellBias = &cellBiasTensor;
1017 data.m_OutputGateBias = &outputGateBiasTensor;
1018 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1019 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1020 data.m_ProjectionWeights = &projectionWeightsTensor;
1021 data.m_ProjectionBias = &projectionBiasTensor;
1022
1023 // Flags to set test configuration
1024 data.m_Parameters.m_ActivationFunc = 4;
1025 data.m_Parameters.m_CifgEnabled = false;
1026 data.m_Parameters.m_PeepholeEnabled = true;
1027 data.m_Parameters.m_ProjectionEnabled = true;
1028
1029
1030 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1031 inputHandle->Allocate();
1032 outputStateInHandle->Allocate();
1033 cellStateInHandle->Allocate();
1034
1035 scratchHandle->Allocate();
1036 outputStateOutHandle->Allocate();
1037 cellStateOutHandle->Allocate();
1038 outputHandle->Allocate();
1039
1040 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1041 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1042 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1043
telsoa01c577f2c2018-08-31 09:22:23 +01001044 workload->Execute();
1045
1046 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1047
1048 return ret;
1049
1050}
1051
Conor Kennedyb9971c92019-05-07 07:14:23 +01001052template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1053LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001054 armnn::IWorkloadFactory& workloadFactory,
1055 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Conor Kennedyb9971c92019-05-07 07:14:23 +01001056 const boost::multi_array<T, 2>& input,
1057 const boost::multi_array<T, 2>& outputExpected,
1058 float qScale = 0.0f,
1059 int32_t qOffset = 0,
1060 armnn::DataType constantDataType = armnn::DataType::Float32)
telsoa01c577f2c2018-08-31 09:22:23 +01001061{
1062 bool cifgEnabled = true;
1063 bool peepholeEnabled = true;
1064 bool projectionEnabled = false;
1065 // These are not the input and the output of Lstm yet
1066 unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
1067 unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
1068
1069 unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
1070
1071 const unsigned int cellSize = outputSize;
1072
1073 // Decide the shape of all input tensors
Conor Kennedyb9971c92019-05-07 07:14:23 +01001074 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset); // change to ArmnnType
1075 armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1076 armnn::TensorInfo cellStateInTensorInfo({batchSize, cellSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +01001077
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00001078 unsigned int scratchBufferSize = cifgEnabled ? cellSize * 3 : cellSize * 4;
Conor Kennedyb9971c92019-05-07 07:14:23 +01001079 armnn::TensorInfo scratchBufferTensorInfo({batchSize, scratchBufferSize}, ArmnnType, qScale, qOffset);
1080 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1081 armnn::TensorInfo cellStateOutTensorInfo({batchSize, cellSize}, ArmnnType, qScale, qOffset);
1082 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +01001083
1084 // List of inputs
1085 std::vector<float> inputData;
1086 inputData.assign(input.data(), input.data() + batchSize*inputSize);
1087 auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputData);
1088
1089 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1090 auto outputStateInTensor = MakeTensor<float, 2>(outputStateInTensorInfo, outputStateInVector);
1091
1092 std::vector<float> cellStateInVector(batchSize * cellSize, 0.f);
1093 auto cellStateInTensor = MakeTensor<float, 2>(cellStateInTensorInfo, cellStateInVector);
1094
1095
1096 // Prepare all the weights in the descriptor for LSTM
1097 armnn::LstmQueueDescriptor data;
Conor Kennedyb9971c92019-05-07 07:14:23 +01001098 armnn::TensorInfo tensorInfoInput({cellSize, inputSize}, constantDataType, qScale, qOffset);
1099 armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, constantDataType, qScale, qOffset);
1100 armnn::TensorInfo tensorInfoNumUnits({cellSize}, constantDataType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +01001101
1102 auto inputToCellWeights = MakeTensor<float, 2>(tensorInfoInput,
1103 {-0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f,
1104 0.04717243f, 0.48944736f, -0.38535351f,
1105 -0.17212132f});
1106 auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfoInput,
1107 {-0.55291498f, -0.42866567f, 0.13056988f,
1108 -0.3633365f, -0.22755712f, 0.28253698f, 0.24407166f,
1109 0.33826375f});
1110 auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfoInput,
1111 {0.10725588f, -0.02335852f, -0.55932593f,
1112 -0.09426838f, -0.44257352f, 0.54939759f,
1113 0.01533556f, 0.42751634f});
1114 auto cellBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
1115 auto forgetGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {1.f, 1.f, 1.f, 1.f});
1116 auto outputGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
1117
1118 auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfoOutput,
1119 {0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f, 0.42957711f,
1120 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f, 0.20675004f,
1121 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f, 0.44901288f,
1122 0.21193194f});
1123 auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfoOutput,
1124 {-0.13832897f, -0.0515101f, -0.2359007f, -0.16661474f, -0.14340827f,
1125 0.36986142f, 0.23414481f, 0.55899f, 0.10798943f, -0.41174671f, 0.17751795f,
1126 -0.34484994f, -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f});
1127
1128 auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfoOutput,
1129 {0.41613156f, 0.42610586f, -0.16495961f, -0.5663873f, 0.30579174f, -0.05115908f,
1130 -0.33941799f, 0.23364776f, 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1131 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f});
1132
1133 auto cellToForgetWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
1134 {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f});
1135 auto cellToOutputWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
1136 {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f});
1137
1138 armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfoInput);
1139 armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
1140 armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfoInput);
1141
1142 armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfoNumUnits);
1143 armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfoNumUnits);
1144 armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfoNumUnits);
1145
1146 armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfoOutput);
1147 armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
1148 armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
1149
1150
1151 armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
1152 armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
1153
1154 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1155 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1156 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1157
1158 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1159 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1160 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1161
1162 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1163 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1164 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1165
1166 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1167 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1168
1169
1170 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1171 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1172 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1173
1174 data.m_CellBias = &cellBiasTensor;
1175 data.m_ForgetGateBias = &forgetGateBiasTensor;
1176 data.m_OutputGateBias = &outputGateBiasTensor;
1177
1178 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1179 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1180 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1181
1182 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1183 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1184
1185 // other parameters for the descriptor
1186 data.m_Parameters.m_CifgEnabled = cifgEnabled;
1187 data.m_Parameters.m_ProjectionEnabled = projectionEnabled;
1188 data.m_Parameters.m_PeepholeEnabled = peepholeEnabled;
1189
1190 data.m_Parameters.m_ActivationFunc = 4;
1191 data.m_Parameters.m_ClippingThresProj = 0.0;
1192 data.m_Parameters.m_ClippingThresCell = 0.0;
1193
1194
1195 // List of outputs
1196 std::vector<float> scratchBufferVector(batchSize * scratchBufferSize, 0.f);
1197 auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001198 LayerTestResult<T, 2> ret0(scratchBufferTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001199
1200 // Output state for a certain time step
1201 std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
1202 auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001203 LayerTestResult<T, 2> ret1(outputStateOutTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001204
1205 // Cell state for a certain time step
1206 std::vector<float> cellStateOutVector(batchSize * cellSize, 0.f);
1207 auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001208 LayerTestResult<T, 2> ret2(cellStateOutTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001209
1210 // Output for a certain time step
1211 std::vector<float> outputVector(batchSize * outputSize, 0.f);
1212 auto outputTensor = MakeTensor<float, 2>(outputTensorInfo, outputVector);
1213 std::vector<float> outputData;
1214 outputData.assign(outputExpected.data(), outputExpected.data() + batchSize*outputSize);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001215 LayerTestResult<T, 2> ret3(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001216 ret3.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputData);
1217
1218 // Prepare the inputs and outputs for the workload
1219 std::unique_ptr<armnn::ITensorHandle> inputHandle =
1220 workloadFactory.CreateTensorHandle(inputTensorInfo);
1221 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1222 workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
1223 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1224 workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
1225
1226 std::unique_ptr<armnn::ITensorHandle> scratchBufferHandle =
1227 workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
1228 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
1229 workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
1230 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
1231 workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
1232 std::unique_ptr<armnn::ITensorHandle> outputHandle =
1233 workloadFactory.CreateTensorHandle(outputTensorInfo);
1234
1235 armnn::WorkloadInfo info;
1236 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1237 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1238 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1239
1240 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchBufferHandle.get());
1241 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
1242 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
1243 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1244
1245 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1246
1247
1248 inputHandle->Allocate();
1249 outputStateInHandle->Allocate();
1250 cellStateInHandle->Allocate();
1251
1252 scratchBufferHandle->Allocate();
1253 outputStateOutHandle->Allocate();
1254 cellStateOutHandle->Allocate();
1255 outputHandle->Allocate();
1256
1257
1258 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1259 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1260 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1261
1262 CopyDataToITensorHandle(scratchBufferHandle.get(), &scratchBufferTensor[0][0]);
1263 CopyDataToITensorHandle(outputStateOutHandle.get(), &outputStateOutTensor[0][0]);
1264 CopyDataToITensorHandle(cellStateOutHandle.get(), &cellStateOutTensor[0][0]);
1265
telsoa01c577f2c2018-08-31 09:22:23 +01001266 workload->Execute();
1267
1268 CopyDataFromITensorHandle(&ret0.output[0][0], scratchBufferHandle.get());
1269 CopyDataFromITensorHandle(&ret1.output[0][0], outputStateOutHandle.get());
1270 CopyDataFromITensorHandle(&ret2.output[0][0], cellStateOutHandle.get());
1271 CopyDataFromITensorHandle(&ret3.output[0][0], outputHandle.get());
1272
1273 return ret3;
1274}
Jan Eilers38e05bd2019-06-26 13:10:09 +01001275
Jan Eilers38e05bd2019-06-26 13:10:09 +01001276template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1277LayerTestResult<T, 2>
1278LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
1279 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1280 const boost::multi_array<T, 2>& input,
1281 const boost::multi_array<T, 2>& outputExpected,
1282 float qScale = 0.0f,
1283 int32_t qOffset = 0,
1284 armnn::DataType constantDataType = armnn::DataType::Float32)
1285{
1286 unsigned int batchSize = 2;
1287 unsigned int outputSize = 3;
1288 unsigned int inputSize = 5;
1289 unsigned numUnits = 4;
1290
1291 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset);
1292 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
1293 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
1294
1295 // Scratch buffer size without CIFG [batchSize, numUnits * 4]
1296 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
1297 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
1298 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1299 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1300
1301 LayerTestResult<T, 2> ret(outputTensorInfo);
1302
1303 std::vector<float> inputVector;
1304 inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
1305 auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
1306
1307 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1308 auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
1309
1310 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1311 auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
1312
1313 std::vector<float> scratchBufferVector(batchSize * numUnits * 4, 0.f);
1314 auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
1315
1316 std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
1317 auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
1318
1319 std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
1320 auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
1321
1322 std::vector<float> outputVector;
1323 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
1324 ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
1325
1326 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1327 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1328 workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
1329 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1330 workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
1331
1332 std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
1333 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
1334 workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
1335 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
1336 workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
1337 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1338
1339 armnn::LstmQueueDescriptor data;
1340 armnn::WorkloadInfo info;
1341
1342 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1343 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1344 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1345
1346 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
1347 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
1348 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
1349 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1350
1351 armnn::TensorInfo tensorInfo3({outputSize}, constantDataType, qScale, qOffset);
1352 armnn::TensorInfo tensorInfo4({numUnits}, constantDataType, qScale, qOffset);
1353 armnn::TensorInfo tensorInfo4x5({numUnits, inputSize}, constantDataType, qScale, qOffset);
1354 armnn::TensorInfo tensorInfo4x3({numUnits, outputSize}, constantDataType, qScale, qOffset);
1355 armnn::TensorInfo tensorInfo3x4({outputSize, numUnits}, constantDataType, qScale, qOffset);
1356
1357 auto inputToInputWeights =
1358 MakeTensor<float, 2>(tensorInfo4x5, { 0.5f, 0.6f, 0.7f, -0.8f, -0.9f,
1359 0.1f, 0.2f, 0.3f, -0.4f, 0.5f,
1360 -0.8f, 0.7f, -0.6f, 0.5f, -0.4f,
1361 -0.5f, -0.4f, -0.3f, -0.2f, -0.1f}); //{numUnits, inputSize}
1362
1363 auto inputToForgetWeights =
1364 MakeTensor<float, 2>(tensorInfo4x5, {-0.6f, -0.1f, 0.3f, 0.2f, 0.9f,
1365 -0.5f, -0.2f, -0.4f, 0.3f, -0.8f,
1366 -0.4f, 0.3f, -0.5f, -0.4f, -0.6f,
1367 0.3f, -0.4f, -0.6f, -0.5f, -0.5f}); //{numUnits, inputSize}
1368
1369 auto inputToCellWeights =
1370 MakeTensor<float, 2>(tensorInfo4x5, {-0.4f, -0.3f, -0.2f, -0.1f, -0.5f,
1371 0.5f, -0.2f, -0.3f, -0.2f, -0.6f,
1372 0.6f, -0.1f, -0.4f, -0.3f, -0.7f,
1373 0.7f, -0.9f, -0.5f, 0.8f, 0.6f}); //{numUnits, inputSize}
1374
1375 auto inputToOutputWeights =
1376 MakeTensor<float, 2>(tensorInfo4x5, {-0.8f, -0.4f, -0.2f, -0.9f, -0.1f,
1377 -0.7f, 0.3f, -0.3f, -0.8f, -0.2f,
1378 0.6f, -0.2f, 0.4f, -0.7f, -0.3f,
1379 -0.5f, 0.1f, 0.5f, -0.6f, -0.4f}); //{numUnits, inputSize}
1380
1381 auto inputGateBias =
1382 MakeTensor<float, 1>(tensorInfo4, {0.03f, 0.15f, 0.22f, 0.38f}); //{numUnits}
1383
1384 auto forgetGateBias =
1385 MakeTensor<float, 1>(tensorInfo4, {0.1f, -0.3f, -0.2f, 0.1f}); //{numUnits}
1386
1387 auto cellBias =
1388 MakeTensor<float, 1>(tensorInfo4, {-0.05f, 0.72f, 0.25f, 0.08f}); //{numUnits}
1389
1390 auto outputGateBias =
1391 MakeTensor<float, 1>(tensorInfo4, {0.05f, -0.01f, 0.2f, 0.1f}); //{numUnits}
1392
1393 auto recurrentToInputWeights =
1394 MakeTensor<float, 2>(tensorInfo4x3, {-0.2f, -0.3f, 0.4f,
1395 0.1f, -0.5f, 0.9f,
1396 -0.2f, -0.3f, -0.7f,
1397 0.05f, -0.2f, -0.6f}); //{numUnits, outputSize}
1398
1399 auto recurrentToCellWeights =
1400 MakeTensor<float, 2>(tensorInfo4x3, {-0.3f, 0.2f, 0.1f,
1401 -0.3f, 0.8f, -0.08f,
1402 -0.2f, 0.3f, 0.8f,
1403 -0.6f, -0.1f, 0.2f}); //{numUnits, outputSize}
1404
1405 auto recurrentToForgetWeights =
1406 MakeTensor<float, 2>(tensorInfo4x3, {-0.5f, -0.3f, -0.5f,
1407 -0.2f, 0.6f, 0.4f,
1408 0.9f, 0.3f, -0.1f,
1409 0.2f, 0.5f, 0.2f}); //{numUnits, outputSize}
1410
1411 auto recurrentToOutputWeights =
1412 MakeTensor<float, 2>(tensorInfo4x3, { 0.3f, -0.1f, 0.1f,
1413 -0.2f, -0.5f, -0.7f,
1414 -0.2f, -0.6f, -0.1f,
1415 -0.4f, -0.7f, -0.2f}); //{numUnits, outputSize}
1416
1417 auto cellToInputWeights =
1418 MakeTensor<float, 1>(tensorInfo4, {0.05f, 0.1f, 0.25f, 0.15f}); //{numUnits}
1419
1420 auto cellToForgetWeights =
1421 MakeTensor<float, 1>(tensorInfo4, {-0.02f, -0.15f, -0.25f, -0.03f}); //{numUnits}
1422
1423 auto cellToOutputWeights =
1424 MakeTensor<float, 1>(tensorInfo4, {0.1f, -0.1f, -0.5f, 0.05f}); //{numUnits}
1425
1426 auto projectionWeights =
1427 MakeTensor<float, 2>(tensorInfo3x4,
1428 {-0.1f, 0.2f, 0.01f, -0.2f,
1429 0.1f, 0.5f, 0.3f, 0.08f,
1430 0.07f, 0.2f, -0.4f, 0.2f}); //{outputSize, numUnits}
1431
1432 std::vector<float> projectionBiasVector(outputSize, 0.f);
1433 auto projectionBias = MakeTensor<float,1>(tensorInfo3, projectionBiasVector); //{outputSize}
1434
1435 auto inputLayerNormWeights =
1436 MakeTensor<float, 1>(tensorInfo4, {0.1f, 0.2f, 0.3f, 0.5f}); //{numUnits}
1437
1438 auto forgetLayerNormWeights =
1439 MakeTensor<float, 1>(tensorInfo4, {0.2f, 0.2f, 0.4f, 0.3f}); //{numUnits}
1440
1441 auto cellLayerNormWeights =
1442 MakeTensor<float, 1>(tensorInfo4, {0.7f, 0.2f, 0.3f, 0.8f}); //{numUnits}
1443
1444 auto outputLayerNormWeights =
1445 MakeTensor<float, 1>(tensorInfo4, {0.6f, 0.2f, 0.2f, 0.5f}); //{numUnits}
1446
1447
1448 armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
1449 armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
1450 armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
1451 armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
1452 armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
1453 armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
1454 armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
1455 armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
1456 armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
1457 armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
1458 armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
1459 armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
1460 armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
1461 armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo4);
1462 armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo4);
1463 armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo3x4);
1464 armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo3);
1465
1466 armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
1467 armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
1468 armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
1469 armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
1470
1471 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
1472 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1473 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1474 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1475 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
1476 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1477 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1478 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1479 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
1480 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
1481 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1482 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1483 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1484 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1485 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1486 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
1487 AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]);
1488
1489 AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, &inputLayerNormWeights[0]);
1490 AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]);
1491 AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]);
1492 AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]);
1493
1494 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1495 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1496 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1497 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1498 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1499 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1500 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1501 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1502 data.m_CellToInputWeights = &cellToInputWeightsTensor;
1503 data.m_InputGateBias = &inputGateBiasTensor;
1504 data.m_ForgetGateBias = &forgetGateBiasTensor;
1505 data.m_CellBias = &cellBiasTensor;
1506 data.m_OutputGateBias = &outputGateBiasTensor;
1507 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1508 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1509 data.m_ProjectionWeights = &projectionWeightsTensor;
1510 data.m_ProjectionBias = &projectionBiasTensor;
1511
1512 data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
1513 data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
1514 data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
1515 data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
1516
1517 // Flags to set test configuration
1518 data.m_Parameters.m_ActivationFunc = 4;
1519 data.m_Parameters.m_CifgEnabled = false;
1520 data.m_Parameters.m_PeepholeEnabled = true;
1521 data.m_Parameters.m_ProjectionEnabled = true;
1522 data.m_Parameters.m_LayerNormEnabled = true;
1523
1524
1525 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1526 inputHandle->Allocate();
1527 outputStateInHandle->Allocate();
1528 cellStateInHandle->Allocate();
1529
1530 scratchHandle->Allocate();
1531 outputStateOutHandle->Allocate();
1532 cellStateOutHandle->Allocate();
1533 outputHandle->Allocate();
1534
1535 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1536 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1537 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1538
1539 workload->Execute();
1540
1541 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1542
1543 return ret;
James Conroy9c3cae82019-08-01 16:01:48 +01001544}
1545
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001546LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
1547 armnn::IWorkloadFactory& workloadFactory,
1548 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1549 const boost::multi_array<uint8_t, 2>& input,
1550 const boost::multi_array<uint8_t, 2>& outputExpected)
James Conroy9c3cae82019-08-01 16:01:48 +01001551{
James Conroy9c3cae82019-08-01 16:01:48 +01001552 auto numBatches = boost::numeric_cast<unsigned int>(input.shape()[0]);
1553 auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
1554 auto outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
1555
1556 // Scale/Offset for input/output, cellState In/Out, weights, bias
1557 float inputOutputScale = 0.0078125f;
1558 int32_t inputOutputOffset = 128;
1559
1560 float cellStateScale = 0.00048828125f;
1561 int32_t cellStateOffset = 0;
1562
1563 float weightsScale = 0.00408021f;
1564 int32_t weightsOffset = 100;
1565
1566 float biasScale = 3.1876640625e-05f;
1567 int32_t biasOffset = 0;
1568
1569 // Input/Output tensor info
1570 armnn::TensorInfo inputInfo({numBatches , inputSize},
1571 armnn::DataType::QuantisedAsymm8,
1572 inputOutputScale,
1573 inputOutputOffset);
1574
1575 armnn::TensorInfo cellStateInfo({numBatches , outputSize},
1576 armnn::DataType::QuantisedSymm16,
1577 cellStateScale,
1578 cellStateOffset);
1579
1580 armnn::TensorInfo outputStateInfo({numBatches , outputSize},
1581 armnn::DataType::QuantisedAsymm8,
1582 inputOutputScale,
1583 inputOutputOffset);
1584
1585 LayerTestResult<uint8_t, 2> ret(outputStateInfo);
1586
1587 // Input0
1588 std::vector<uint8_t> inputVector;
1589 inputVector.assign(input.data(), input.data() + (numBatches * inputSize));
1590 auto inputTensor = MakeTensor<uint8_t, 2>(inputInfo, inputVector);
1591
1592 // Input1
1593 std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036}; // 13
1594 auto cellStateInTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateInVector);
1595
1596 // Input2
1597 std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112}; // 14
1598 auto outputStateInTensor = MakeTensor<uint8_t, 2>(outputStateInfo, outputStateInVector);
1599
1600 // Output0
1601 std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235}; // 0
1602 auto cellStateOutTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateOutVector);
1603
1604 // Output1
1605 std::vector<uint8_t> outputVector; // 1
1606 outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
1607 ret.outputExpected = MakeTensor<uint8_t, 2>(outputStateInfo, outputVector);
1608
1609 // Create tensor handles
1610 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
1611 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1612 workloadFactory.CreateTensorHandle(cellStateInfo);
1613 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1614 workloadFactory.CreateTensorHandle(outputStateInfo);
1615
1616 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
1617 workloadFactory.CreateTensorHandle(cellStateInfo);
1618 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputStateInfo);
1619
1620 armnn::QuantizedLstmQueueDescriptor data;
1621 armnn::WorkloadInfo info;
1622
1623 // Add inputs and outputs to workload
1624 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
1625 AddInputToWorkload(data, info, cellStateInfo, cellStateInHandle.get());
1626 AddInputToWorkload(data, info, outputStateInfo, outputStateInHandle.get());
1627
1628 AddOutputToWorkload(data, info, cellStateInfo, cellStateOutHandle.get());
1629 AddOutputToWorkload(data, info, outputStateInfo, outputHandle.get());
1630
1631 // Weights and bias tensor and quantization info
1632 armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
1633 armnn::DataType::QuantisedAsymm8,
1634 weightsScale,
1635 weightsOffset);
1636
1637 armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
1638 armnn::DataType::QuantisedAsymm8,
1639 weightsScale,
1640 weightsOffset);
1641
1642 armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
1643
1644 // Weights and bias tensor data
1645 auto inputToInputWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {146, 250, 235, 171, 10, 218, 171, 108});
1646 auto inputToForgetWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {24, 50, 132, 179, 158, 110, 3, 169});
1647 auto inputToCellWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {133, 34, 29, 49, 206, 109, 54, 183});
1648 auto inputToOutputWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {195, 187, 11, 99, 109, 10, 218, 48});
1649
1650 auto recurrentToInputWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1651 {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26});
1652 auto recurrentToForgetWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1653 {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253});
1654 auto recurrentToCellWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1655 {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216});
1656 auto recurrentToOutputWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1657 {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98});
1658
1659 auto inputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-7876, 13488, -726, 32839});
1660 auto forgetGateBias = MakeTensor<int32_t, 1>(biasInfo, {9206, -46884, -11693, -38724});
1661 auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {39481, 48624, 48976, -21419});
1662 auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-58999, -17050, -41852, -40538});
1663
1664 // ScopedCpuTensorHandles
1665 armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
1666 armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
1667 armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
1668 armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
1669
1670 armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
1671 armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
1672 armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
1673 armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
1674
1675 armnn::ScopedCpuTensorHandle inputGateBiasTensor(biasInfo);
1676 armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
1677 armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
1678 armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
1679
1680 // Allocate and copy data
1681 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
1682 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1683 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1684 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1685
1686 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
1687 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1688 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1689 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1690
1691 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
1692 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1693 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1694 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1695
1696 // Setup queue descriptor
1697 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1698 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1699 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1700 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1701
1702 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1703 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1704 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1705 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1706
1707 data.m_InputGateBias = &inputGateBiasTensor;
1708 data.m_ForgetGateBias = &forgetGateBiasTensor;
1709 data.m_CellBias = &cellBiasTensor;
1710 data.m_OutputGateBias = &outputGateBiasTensor;
1711
1712 // Create workload and allocate tensor handles
1713 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantizedLstm(data, info);
1714 inputHandle->Allocate();
1715 outputStateInHandle->Allocate();
1716 cellStateInHandle->Allocate();
1717
1718 cellStateOutHandle->Allocate();
1719 outputHandle->Allocate();
1720
1721 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1722 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1723 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1724
1725 workload->Execute();
1726
1727 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1728
1729 return ret;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001730}
1731
1732} // anonymous namespace
1733
1734#if defined(ARMNNREF_ENABLED)
1735
1736// The LSTM test units are run only for the reference backend at the moment
1737
1738void LstmUtilsZeroVectorTest()
1739{
1740 armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
1741 boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1742 {2., 3., 3., 4.}));
1743
1744 boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1745 {0., 0., 0., 0.}));
1746
1747 return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
1748}
1749
1750void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
1751{
1752 uint32_t batchSize = 2;
1753 uint32_t vecSize = 4;
1754 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1755 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1756 { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
1757 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1
1758
1759 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1760 { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
1761 -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1
1762
1763 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1764 vecSize, batchSize, expectedOutput);
1765}
1766
1767void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
1768{
1769 uint32_t batchSize = 2;
1770 uint32_t vecSize = 4;
1771 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1772 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1773 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1774 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
1775
1776 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1777 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1778 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
1779
1780 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1781 vecSize, batchSize, expectedOutput);
1782}
1783
1784void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
1785{
1786 uint32_t batchSize = 2;
1787 uint32_t vecSize = 4;
1788 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1789 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1790 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1791 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1
1792
1793 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1794 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1795 -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1
1796
1797 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1798 vecSize, batchSize, expectedOutput);
1799}
1800
1801void LstmUtilsVectorBatchVectorCwiseProductTest()
1802{
1803 uint32_t batchSize = 4;
1804 uint32_t vecSize = 29;
1805 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1806 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1807 { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
1808 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
1809 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}));
1810
1811 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1812 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1813 { /* batch 0 */
1814 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
1815 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
1816 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f,
1817 /* batch 1 */
1818 -1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.1f,
1819 -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
1820 -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f, 0.0f,
1821 /* batch 2 */
1822 1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.1f,
1823 11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
1824 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f, 0.0f,
1825 /* batch 3 */
1826 -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f,
1827 -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
1828 -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}));
1829
1830 // Expect output = input * output + output.
1831 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1832 { /* batch 0 */
1833 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f,
1834 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f,
1835 172.396896f, 199.939606f, 229.522491f, 261.145599f, 294.808899f, 330.512421f,
1836 368.256134f, 408.040039f, 449.864075f, 493.728363f, 539.632874f, 587.577576f,
1837 637.562500f, 689.587585f, 743.652954f, 799.758423f, 0.000000f,
1838 /* batch 1 */
1839 -1.210000f, -4.840000f, -10.889999f, -19.360001f, -30.250000f, -43.559998f,
1840 -59.289997f, -77.440002f, -98.009995f, -102.010010f, -123.432091f, -146.894394f,
1841 -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
1842 -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
1843 -637.562500f, -689.587585f, -743.652954f, -799.758423f, 0.000000f,
1844 /* batch 2 */
1845 1.210000f, -4.840000f, 10.889999f, -19.360001f, 30.250000f, -43.559998f,
1846 59.289997f, -77.440002f, 98.009995f, -102.010010f, 123.432091f, -146.894394f,
1847 172.396896f, -199.939606f, 229.522491f, -261.145599f, 294.808899f, -330.512421f,
1848 368.256134f, -408.040039f, 449.864075f, -493.728363f, 539.632874f, -587.577576f,
1849 637.562500f, -689.587585f, 743.652954f, -799.758423f, 0.000000f,
1850 /* batch 3 */
1851 -1.210000f, 4.840000f, -10.889999f, 19.360001f, -30.250000f, 43.559998f,
1852 -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f,
1853 -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f,
1854 -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f,
1855 -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}));
1856
1857 return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
1858 vecSize, batchSize, expectedOutput);
1859}
1860
1861void LstmUtilsVectorBatchVectorAddTest()
1862{
1863 uint32_t batchSize = 2;
1864 uint32_t vecSize = 3;
1865 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1866 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1867 { 0.0f, -0.5f, 1.0f}));
1868
1869 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1870 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1871 { 1.0f, 2.0f, 3.0f, //batch 0
1872 4.0f, 5.0f, 6.0f})); //batch 1
1873
1874 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1875 { 1.0f, 1.5f, 4.0f,
1876 4.0f, 4.5f, 7.0f}));
1877
1878 return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
1879 vecSize, batchSize, expectedOutput);
1880}
1881
1882#endif
1883
1884LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
1885 armnn::IWorkloadFactory& workloadFactory,
1886 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1887{
1888 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
1889 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1890 { 2., 3., 3., 4. }));
1891
1892 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
1893 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1894 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1895 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
1896 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
1897 workloadFactory, memoryManager, input, expectedOutput);
1898}
1899
1900LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
1901 armnn::IWorkloadFactory& workloadFactory,
1902 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1903{
1904 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
1905 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1906 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1907 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1908
1909 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
1910 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1911 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1912 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1913 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1914 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1915 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1916 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1917 0.02168f}));
1918 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1919 workloadFactory, memoryManager, input, expectedOutput);
1920}
1921
1922LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1923 armnn::IWorkloadFactory& workloadFactory,
1924 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1925{
1926 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
1927 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1928 {2., 3., 3., 4.}));
1929
1930 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
1931 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1932 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1933 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1934
1935 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
1936 workloadFactory, memoryManager, input, expectedOutput);
1937}
1938
1939LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
1940 armnn::IWorkloadFactory& workloadFactory,
1941 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1942{
1943 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
1944 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1945 {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0
1946 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1
1947
1948 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
1949 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1950 { 0.0244077f, 0.128027f, -0.00170918f, //batch 0
1951 -0.00692428f, 0.0848741f, 0.063445f})); //batch 1
1952 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
1953 workloadFactory, memoryManager, input, expectedOutput);
1954}
1955
1956LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1957 armnn::IWorkloadFactory& workloadFactory,
1958 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1959{
1960 const float qScale = 1.0f;
1961 const int32_t qOffset = 0;
1962
1963 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1964 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1965
1966 armnn::TensorInfo inputDesc({2, 2}, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001967 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(
1968 inputDesc,
1969 armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001970
1971 armnn::TensorInfo outputDesc({2, 4}, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001972 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(
1973 outputDesc,
1974 armnnUtils::QuantizedVector<int16_t>(
1975 {
1976 -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
1977 -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
1978 },
1979 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001980
1981 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1982 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1983
1984}
1985
1986LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1987 armnn::IWorkloadFactory& workloadFactory,
1988 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1989{
1990 const float qScale = 1.0f;
1991 const int32_t qOffset = 0;
1992
1993 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1994 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1995
1996 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01001997 boost::multi_array<int16_t, 2> input =
1998 MakeTensor<int16_t, 2>(
1999 inputDesc,
2000 armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002001
2002 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002003 boost::multi_array<int16_t, 2> expectedOutput =
2004 MakeTensor<int16_t, 2>(
2005 outputDesc,
2006 armnnUtils::QuantizedVector<int16_t>(
2007 {
2008 -0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
2009 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f
2010 },
2011 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002012
2013 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2014 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2015}
2016
2017LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2018 armnn::IWorkloadFactory& workloadFactory,
2019 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2020{
2021 const float qScale = 2.0f;
2022 const int32_t qOffset = 0;
2023
2024 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2025 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2026
2027 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002028 boost::multi_array<int16_t, 2> input =
2029 MakeTensor<int16_t, 2>(
2030 inputDesc,
2031 armnnUtils::QuantizedVector<int16_t>(
2032 {
2033 0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2034 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f
2035 },
2036 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002037
2038 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002039 boost::multi_array<int16_t, 2> expectedOutput =
2040 MakeTensor<int16_t, 2>(
2041 outputDesc,
2042 armnnUtils::QuantizedVector<int16_t>(
2043 {
2044 -0.00396806f, 0.02935200f, -0.00279226f, 0.01599770f,
2045 -0.00835576f, -0.02117790f, 0.02835120f, -0.01145970f,
2046 0.00907307f, -0.02440040f, -0.01521910f, -0.02590630f,
2047 0.00914318f, 0.00415118f, 0.01714700f, 0.01342030f,
2048 -0.01386900f, 0.02872680f, -0.00334693f, 0.00733398f,
2049 -0.02879260f, -0.01869260f, 0.01936620f, -0.01154370f,
2050 0.00422612f, -0.03452320f, 0.00223253f, -0.00957321f,
2051 0.02106240f, 0.01333100f, 0.01509540f, 0.02168000f
2052 },
2053 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002054
2055 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2056 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2057}
2058
2059LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2060 armnn::IWorkloadFactory& workloadFactory,
2061 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2062{
2063 const float qScale = 1.0f;
2064 const int32_t qOffset = 0;
2065
2066 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2067
2068 armnn::TensorInfo inputDesc({2, 2}, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002069 boost::multi_array<int16_t , 2> input =
2070 MakeTensor<int16_t , 2>(inputDesc,
2071 armnnUtils::QuantizedVector<int16_t>({ 2.f, 3.f, 3.f, 4.f }, qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002072
2073 armnn::TensorInfo outputDesc({2, 4}, datatype);
Aron Virginas-Tar48623a02019-10-22 10:00:28 +01002074 boost::multi_array<int16_t, 2> expectedOutput =
2075 MakeTensor<int16_t, 2>(
2076 outputDesc,
2077 armnnUtils::QuantizedVector<int16_t>(
2078 {
2079 -0.02973187f, 0.12294730f, 0.20885126f, -0.15358765f,
2080 -0.01854220f, 0.11281417f, 0.24466537f, -0.18262920f
2081 },
2082 qScale, qOffset));
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01002083
2084 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2085 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2086}
2087
2088//
2089// QuantizedLstm
2090//
2091
2092LayerTestResult<uint8_t, 2> QuantizedLstmTest(
2093 armnn::IWorkloadFactory& workloadFactory,
2094 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2095{
2096 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
2097 boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
2098 {166, 179, 50, 150}));
2099
2100 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
2101 boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
2102 {140, 151, 146, 112, 136, 156, 142, 112 }));
2103
2104 return QuantizedLstmTestImpl(workloadFactory, memoryManager, input, expectedOutput);
2105}