blob: c07f6232feea4294e8fe6ca58ebfc22c757d0b9e [file] [log] [blame]
telsoa01c577f2c2018-08-31 09:22:23 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa01c577f2c2018-08-31 09:22:23 +01004//
telsoa01c577f2c2018-08-31 09:22:23 +01005
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01006#include "LstmTestImpl.hpp"
Aron Virginas-Tarc9cc8042018-11-01 16:15:57 +00007
telsoa01c577f2c2018-08-31 09:22:23 +01008#include <armnn/ArmNN.hpp>
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01009
10#include <backendsCommon/CpuTensorHandle.hpp>
11
12#include <backendsCommon/test/QuantizeHelper.hpp>
13#include <backendsCommon/test/TensorCopyUtils.hpp>
14#include <backendsCommon/test/WorkloadTestUtils.hpp>
15
16#include <reference/workloads/Decoders.hpp>
17#include <reference/workloads/Encoders.hpp>
18#include <reference/workloads/LstmUtils.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010019
David Beckac42efd2018-09-26 17:41:13 +010020#include <test/TensorHelpers.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010021
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010022#include <boost/multi_array.hpp>
telsoa01c577f2c2018-08-31 09:22:23 +010023
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010024namespace
25{
Jan Eilers38e05bd2019-06-26 13:10:09 +010026
27template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
28void LstmUtilsVectorBatchVectorAddTestImpl(
29 boost::multi_array<float, 1>& vec,
30 boost::multi_array<float, 2>& batchVec,
31 uint32_t vSize,
32 uint32_t nBatch,
33 boost::multi_array<float, 2>& expectedOutput )
34{
35 float qScale = 0.0f;
36 int32_t qOffset = 0;
37 armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
38
39 // Make encoder and decoder
40 std::unique_ptr<armnn::Decoder<float>> vecDecoder = armnn::MakeDecoder<float>(tensorInfo, vec.data());
41 std::unique_ptr<armnn::Decoder<float>> batchVecDecoder = armnn::MakeDecoder<float>(tensorInfo, batchVec.data());
42 std::unique_ptr<armnn::Encoder<float>> batchVecEncoder = armnn::MakeEncoder<float>(tensorInfo, batchVec.data());
43
44 VectorBatchVectorAdd(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
45
46 // check shape and compare values
47 BOOST_TEST(CompareTensors(batchVec, expectedOutput));
48
49 // check if iterator is back at start position
50 batchVecEncoder->Set(1.0f);
51 BOOST_TEST(batchVec[0][0] == 1.0f);
52}
53
54template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
55void LstmUtilsZeroVectorTestImpl(
56 boost::multi_array<float, 1>& input,
57 uint32_t vSize,
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +010058 boost::multi_array<float, 1>& expectedOutput)
59{
Jan Eilers38e05bd2019-06-26 13:10:09 +010060 float qScale = 0.0f;
61 int32_t qOffset = 0;
62
63 armnn::TensorInfo tensorInfo({vSize}, ArmnnType, qScale, qOffset );
64
65 // Make encoder for input
66 std::unique_ptr<armnn::Encoder<float>> outputEncoder = armnn::MakeEncoder<float>(tensorInfo, input.data());
67
68 // call ZeroVector
69 ZeroVector(*outputEncoder, vSize);
70
71 // check shape and compare values
72 BOOST_TEST(CompareTensors(input, expectedOutput));
73
74 // check if iterator is back at start position
75 outputEncoder->Set(1.0f);
76 BOOST_TEST(input[0] == 1.0f);
77
78}
79
Jan Eilers38e05bd2019-06-26 13:10:09 +010080template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
81void LstmUtilsMeanStddevNormalizationTestImpl(
82 boost::multi_array<float, 2>& input,
83 uint32_t vSize,
84 uint32_t nBatch,
85 boost::multi_array<float, 2>& expectedOutput)
86{
87 float qScale = 0.0f;
88 int32_t qOffset = 0;
89 armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
90
91 // Make encoder and decoder for input
92 std::unique_ptr<armnn::Decoder<float>> inputDecoder = armnn::MakeDecoder<float>(tensorInfo, input.data());
93 std::unique_ptr<armnn::Encoder<float>> outputEncoder = armnn::MakeEncoder<float>(tensorInfo, input.data());
94
95 MeanStddevNormalization(*inputDecoder, *outputEncoder, vSize, nBatch, 1e-8f);
96
97 // check shape and compare values
98 BOOST_TEST(CompareTensors(input, expectedOutput));
99
100 // check if iterator is back at start position
101 outputEncoder->Set(1.0f);
102 BOOST_TEST(input[0][0] == 1.0f);
103}
104
105template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
106void LstmUtilsVectorBatchVectorCwiseProductTestImpl(
107 boost::multi_array<float, 1>& vec,
108 boost::multi_array<float, 2>& batchVec,
109 uint32_t vSize,
110 uint32_t nBatch,
111 boost::multi_array<float, 2>& expectedOutput)
112{
113 float qScale = 0.0f;
114 int32_t qOffset = 0;
115 armnn::TensorInfo tensorInfo({nBatch, vSize}, ArmnnType, qScale, qOffset );
116
117 // Make encoder and decoder
118 std::unique_ptr<armnn::Decoder<float>> vecDecoder = armnn::MakeDecoder<float>(tensorInfo, vec.data());
119 std::unique_ptr<armnn::Decoder<float>> batchVecDecoder = armnn::MakeDecoder<float>(tensorInfo, batchVec.data());
120 std::unique_ptr<armnn::Encoder<float>> batchVecEncoder = armnn::MakeEncoder<float>(tensorInfo, batchVec.data());
121
122 VectorBatchVectorCwiseProduct(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
123
124 // check shape and compare values
125 BOOST_TEST(CompareTensors(batchVec, expectedOutput));
126
127 // check if iterator is back at start position
128 batchVecEncoder->Set(1.0f);
129 BOOST_TEST(batchVec[0][0] == 1.0f);
130}
131
132// Lstm Layer tests:
James Conroy9c3cae82019-08-01 16:01:48 +0100133// *********************************** //
Conor Kennedyb9971c92019-05-07 07:14:23 +0100134template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
135LayerTestResult<T, 2>
136LstmNoCifgNoPeepholeNoProjectionTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000137 armnn::IWorkloadFactory& workloadFactory,
138 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Conor Kennedyb9971c92019-05-07 07:14:23 +0100139 const boost::multi_array<T, 2>& input,
140 const boost::multi_array<T, 2>& outputExpected,
141 float qScale = 0.0f,
142 int32_t qOffset = 0,
143 armnn::DataType constantDataType = armnn::DataType::Float32)
telsoa01c577f2c2018-08-31 09:22:23 +0100144{
145 unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
146 unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
147 unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
148 // cellSize and outputSize have the same size when there is no projection.
149 unsigned numUnits = outputSize;
150
Conor Kennedyb9971c92019-05-07 07:14:23 +0100151 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset );
152 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
153 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100154
Conor Kennedyb9971c92019-05-07 07:14:23 +0100155 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
156 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
157 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
158 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100159
Conor Kennedyb9971c92019-05-07 07:14:23 +0100160 LayerTestResult<T, 2> ret(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100161
162 std::vector<float> inputVector;
163 inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
164 auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
165
166 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
167 auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
168
169 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
170 auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
171
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000172 std::vector<float> scratchBufferVector(batchSize * numUnits * 4, 0.f);
telsoa01c577f2c2018-08-31 09:22:23 +0100173 auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
174
175 std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
176 auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
177
178 std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
179 auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
180
181 std::vector<float> outputVector;
182 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
183 ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
184
185 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
186 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
187 workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
188 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
189 workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
190
191 std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
192 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
193 workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
194 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
195 workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
196 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
197
198
199 armnn::LstmQueueDescriptor data;
200 armnn::WorkloadInfo info;
201
202 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
203 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
204 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
205
206 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
207 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
208 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
209 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
210
Conor Kennedyb9971c92019-05-07 07:14:23 +0100211 armnn::TensorInfo tensorInfo4({numUnits}, constantDataType , qScale, qOffset);
212 armnn::TensorInfo tensorInfo8({numUnits, 2}, constantDataType, qScale, qOffset);
213 armnn::TensorInfo tensorInfo16({numUnits, 4}, constantDataType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100214
215 auto inputToInputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.45018822f, -0.02338299f, -0.0870589f,
216 -0.34550029f, 0.04266912f, -0.15680569f,
217 -0.34856534f, 0.43890524f});
218
219 auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfo8, {0.09701663f, 0.20334584f, -0.50592935f,
220 -0.31343272f, -0.40032279f, 0.44781327f,
221 0.01387155f, -0.35593212f});
222
223 auto inputToCellWeights = MakeTensor<float, 2>(tensorInfo8, {-0.50013041f, 0.1370284f, 0.11810488f, 0.2013163f,
224 -0.20583314f, 0.44344562f, 0.22077113f,
225 -0.29909778f});
226
227 auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfo8, {-0.25065863f, -0.28290087f, 0.04613829f,
228 0.40525138f, 0.44272184f, 0.03897077f,
229 -0.1556896f, 0.19487578f});
230
231 auto recurrentToInputWeights = MakeTensor<float, 2>(tensorInfo16, {-0.0063535f, -0.2042388f, 0.31454784f,
232 -0.35746509f, 0.28902304f, 0.08183324f,
233 -0.16555229f, 0.02286911f, -0.13566875f,
234 0.03034258f, 0.48091322f, -0.12528998f,
235 0.24077177f, -0.51332325f, -0.33502164f,
236 0.10629296f});
237
238 auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfo16, {-0.48684245f, -0.06655136f, 0.42224967f,
239 0.2112639f, 0.27654213f, 0.20864892f,
240 -0.07646349f, 0.45877004f, 0.00141793f,
241 -0.14609534f, 0.36447752f, 0.09196436f,
242 0.28053468f, 0.01560611f, -0.20127171f,
243 -0.01140004f});
244
245 auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfo16, {-0.3407414f, 0.24443203f, -0.2078532f,
246 0.26320225f, 0.05695659f, -0.00123841f,
247 -0.4744786f, -0.35869038f, -0.06418842f,
248 -0.13502428f, -0.501764f, 0.22830659f,
249 -0.46367589f, 0.26016325f, -0.03894562f,
250 -0.16368064f});
251
252 auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfo16, {0.43385774f, -0.17194885f, 0.2718237f,
253 0.09215671f, 0.24107647f, -0.39835793f,
254 0.18212086f, 0.01301402f, 0.48572797f,
255 -0.50656658f, 0.20047462f, -0.20607421f,
256 -0.51818722f, -0.15390486f, 0.0468148f,
257 0.39922136f});
258
259 auto cellToInputWeights = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
260
261 auto inputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
262
263 auto forgetGateBias = MakeTensor<float, 1>(tensorInfo4, {1., 1., 1., 1.});
264
265 auto cellBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
266
267 auto outputGateBias = MakeTensor<float, 1>(tensorInfo4, {0., 0., 0., 0.});
268
269 armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo8);
270 armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo8);
271 armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo8);
272 armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo8);
telsoa01c577f2c2018-08-31 09:22:23 +0100273 armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo16);
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000274 armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo16);
telsoa01c577f2c2018-08-31 09:22:23 +0100275 armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo16);
276 armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo16);
277 armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
278 armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
279 armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
280 armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
281 armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
282
283 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
284 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
285 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
286 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
287 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
288 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
289 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
290 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
291 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
292 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
293 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
294 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
295 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
296
297 data.m_InputToInputWeights = &inputToInputWeightsTensor;
298 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
299 data.m_InputToCellWeights = &inputToCellWeightsTensor;
300 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
301 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
302 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
303 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
304 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
telsoa01c577f2c2018-08-31 09:22:23 +0100305 data.m_InputGateBias = &inputGateBiasTensor;
306 data.m_ForgetGateBias = &forgetGateBiasTensor;
307 data.m_CellBias = &cellBiasTensor;
308 data.m_OutputGateBias = &outputGateBiasTensor;
309
telsoa01c577f2c2018-08-31 09:22:23 +0100310 // Flags to set test configuration
311 data.m_Parameters.m_ActivationFunc = 4;
312 data.m_Parameters.m_CifgEnabled = false;
313 data.m_Parameters.m_PeepholeEnabled = false;
314 data.m_Parameters.m_ProjectionEnabled = false;
315
telsoa01c577f2c2018-08-31 09:22:23 +0100316 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
317 inputHandle->Allocate();
318 outputStateInHandle->Allocate();
319 cellStateInHandle->Allocate();
320
321 scratchHandle->Allocate();
322 outputStateOutHandle->Allocate();
323 cellStateOutHandle->Allocate();
324 outputHandle->Allocate();
325
326 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
327 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
328 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
329
telsoa01c577f2c2018-08-31 09:22:23 +0100330 workload->Execute();
331
332 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
333
334 return ret;
335}
336
Conor Kennedyb9971c92019-05-07 07:14:23 +0100337template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
338LayerTestResult<T, 2>
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000339LstmLayerNoCifgWithPeepholeWithProjectionTestImpl(armnn::IWorkloadFactory& workloadFactory,
340 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Conor Kennedyb9971c92019-05-07 07:14:23 +0100341 const boost::multi_array<T, 2>& input,
342 const boost::multi_array<T, 2>& outputExpected,
343 float qScale = 0.0f,
344 int32_t qOffset = 0,
345 armnn::DataType constantDataType = armnn::DataType::Float32)
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +0000346{
telsoa01c577f2c2018-08-31 09:22:23 +0100347 unsigned int batchSize = 2;
348 unsigned int outputSize = 16;
349 unsigned int inputSize = 5;
350 unsigned numUnits = 20;
351
Conor Kennedyb9971c92019-05-07 07:14:23 +0100352 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset);
353 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
354 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100355
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000356 // Scratch buffer size without CIFG [batchSize, numUnits * 4]
Conor Kennedyb9971c92019-05-07 07:14:23 +0100357 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
358 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
359 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
360 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100361
Conor Kennedyb9971c92019-05-07 07:14:23 +0100362 LayerTestResult<T, 2> ret(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +0100363
364 std::vector<float> inputVector;
365 inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
366 auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
367
368 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
369 auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
370
371 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
372 auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
373
Matteo Martincigha65b7ae2018-11-14 12:39:55 +0000374 std::vector<float> scratchBufferVector(batchSize * numUnits * 4, 0.f);
telsoa01c577f2c2018-08-31 09:22:23 +0100375 auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
376
377 std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
378 auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
379
380 std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
381 auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
382
383 std::vector<float> outputVector;
384 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
385 ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
386
387 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
388 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
389 workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
390 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
391 workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
392
393 std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
394 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
395 workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
396 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
397 workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
398 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
399
400 armnn::LstmQueueDescriptor data;
401 armnn::WorkloadInfo info;
402
403 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
404 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
405 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
David Beckac42efd2018-09-26 17:41:13 +0100406
telsoa01c577f2c2018-08-31 09:22:23 +0100407 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
408 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
409 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
410 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
411
Conor Kennedyb9971c92019-05-07 07:14:23 +0100412 armnn::TensorInfo tensorInfo16({outputSize}, constantDataType, qScale, qOffset);
413 armnn::TensorInfo tensorInfo20({numUnits}, constantDataType, qScale, qOffset);
414 armnn::TensorInfo tensorInfo20x5({numUnits, inputSize}, constantDataType, qScale, qOffset);
415 armnn::TensorInfo tensorInfo20x16({numUnits, outputSize}, constantDataType, qScale, qOffset);
416 armnn::TensorInfo tensorInfo16x20({outputSize, numUnits}, constantDataType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +0100417
418 auto inputToInputWeights =
419 MakeTensor<float, 2>(tensorInfo20x5, {0.021393683f,0.06124551f, 0.046905167f,-0.014657677f,-0.03149463f,
420 0.09171803f, 0.14647801f,0.10797193f, -0.0057968358f,0.0019193048f,
421 -0.2726754f, 0.10154029f, -0.018539885f, 0.080349885f, -0.10262385f,
422 -0.022599787f,-0.09121155f, -0.008675967f, -0.045206103f,-0.0821282f,
423 -0.008045952f,0.015478081f, 0.055217247f, 0.038719587f, 0.044153627f,
424 -0.06453243f,0.05031825f, -0.046935108f, -0.008164439f, 0.014574226f,
425 -0.1671009f, -0.15519552f, -0.16819797f,-0.13971269f,-0.11953059f,
426 0.25005487f, -0.22790983f, 0.009855087f, -0.028140958f, -0.11200698f,
427 0.11295408f, -0.0035217577f, 0.054485075f, 0.05184695f, 0.064711206f,
428 0.10989193f, 0.11674786f, 0.03490607f, 0.07727357f, 0.11390585f,
429 -0.1863375f, -0.1034451f, -0.13945189f, -0.049401227f, -0.18767063f,
430 0.042483903f, 0.14233552f, 0.13832581f, 0.18350165f, 0.14545603f,
431 -0.028545704f,0.024939531f,0.050929718f,0.0076203286f,-0.0029723682f,
432 -0.042484224f, -0.11827596f, -0.09171104f, -0.10808628f,-0.16327988f,
433 -0.2273378f, -0.0993647f, -0.017155107f,0.0023917493f,0.049272764f,
434 0.0038534778f, 0.054764505f, 0.089753784f, 0.06947234f, 0.08014476f,
435 -0.04544234f, -0.0497073f,-0.07135631f, -0.048929106f,-0.004042012f,
436 -0.009284026f, 0.018042054f, 0.0036860977f,-0.07427302f, -0.11434604f,
437 -0.018995456f, 0.031487543f, 0.012834908f,0.019977754f,0.044256654f,
438 -0.39292613f, -0.18519334f, -0.11651281f,-0.06809892f, 0.011373677f
439 });
440
441 auto inputToForgetWeights =
442 MakeTensor<float, 2>(tensorInfo20x5, {-0.0018401089f, -0.004852237f,0.03698424f, 0.014181704f,0.028273236f,
443 -0.016726194f, -0.05249759f,-0.10204261f, 0.00861066f,-0.040979505f,
444 -0.009899187f,0.01923892f,-0.028177269f, -0.08535103f,-0.14585495f,
445 0.10662567f,-0.01909731f,-0.017883534f,-0.0047269356f,-0.045103323f,
446 0.0030784295f,0.076784775f,0.07463696f, 0.094531395f,0.0814421f,
447 -0.12257899f, -0.033945758f,-0.031303465f, 0.045630626f,0.06843887f,
448 -0.13492945f, -0.012480007f,-0.0811829f, -0.07224499f,-0.09628791f,
449 0.045100946f,0.0012300825f, 0.013964662f, 0.099372394f,0.02543059f,
450 0.06958324f, 0.034257296f, 0.0482646f, 0.06267997f,0.052625068f,
451 0.12784666f, 0.07077897f, 0.025725935f, 0.04165009f,0.07241905f,
452 0.018668644f, -0.037377294f,-0.06277783f,-0.08833636f,-0.040120605f,
453 -0.011405586f,-0.007808335f,-0.010301386f,-0.005102167f,0.027717464f,
454 0.05483423f, 0.11449111f, 0.11289652f,0.10939839f, 0.13396506f,
455 -0.08402166f,-0.01901462f, -0.044678304f,-0.07720565f,0.014350063f,
456 -0.11757958f, -0.0652038f, -0.08185733f,-0.076754324f,-0.092614375f,
457 0.10405491f, 0.052960336f, 0.035755895f,0.035839386f,-0.012540553f,
458 0.036881298f, 0.02913376f, 0.03420159f,0.05448447f,-0.054523353f,
459 0.02582715f, 0.02327355f, -0.011857179f,-0.0011980024f,-0.034641717f,
460 -0.026125094f,-0.17582615f,-0.15923657f,-0.27486774f,-0.0006143371f,
461 0.0001771948f, -8.470171e-05f, 0.02651807f,0.045790765f,0.06956496f
462 });
463
464 auto inputToCellWeights =
465 MakeTensor<float, 2>(tensorInfo20x5, {-0.04580283f, -0.09549462f, -0.032418985f, -0.06454633f,
466 -0.043528453f, 0.043018587f, -0.049152344f, -0.12418144f,
467 -0.078985475f, -0.07596889f, 0.019484362f, -0.11434962f,
468 -0.0074034138f, -0.06314844f, -0.092981495f, 0.0062155537f,
469 -0.025034338f, -0.0028890965f, 0.048929527f, 0.06235075f,
470 0.10665918f, -0.032036792f, -0.08505916f, -0.10843358f,
471 -0.13002433f, -0.036816437f, -0.02130134f, -0.016518239f,
472 0.0047691227f, -0.0025825808f, 0.066017866f, 0.029991534f,
473 -0.10652836f, -0.1037554f, -0.13056071f, -0.03266643f,
474 -0.033702414f, -0.006473424f, -0.04611692f, 0.014419339f,
475 -0.025174323f, 0.0396852f, 0.081777506f, 0.06157468f,
476 0.10210095f, -0.009658194f, 0.046511717f, 0.03603906f,
477 0.0069369148f, 0.015960095f, -0.06507666f, 0.09551598f,
478 0.053568836f, 0.06408714f, 0.12835667f, -0.008714329f,
479 -0.20211966f, -0.12093674f, 0.029450472f, 0.2849013f,
480 -0.029227901f, 0.1164364f, -0.08560263f, 0.09941786f,
481 -0.036999565f, -0.028842626f, -0.0033637602f, -0.017012902f,
482 -0.09720865f, -0.11193351f, -0.029155117f, -0.017936034f,
483 -0.009768936f, -0.04223324f, -0.036159635f, 0.06505112f,
484 -0.021742892f, -0.023377212f, -0.07221364f, -0.06430552f,
485 0.05453865f, 0.091149814f, 0.06387331f, 0.007518393f,
486 0.055960953f, 0.069779344f, 0.046411168f, 0.10509911f,
487 0.07463894f, 0.0075130584f, 0.012850982f, 0.04555431f,
488 0.056955688f, 0.06555285f, 0.050801456f, -0.009862683f,
489 0.00826772f, -0.026555609f, -0.0073611983f, -0.0014897042f
490 });
491
492 auto inputToOutputWeights =
493 MakeTensor<float, 2>(tensorInfo20x5, {-0.0998932f, -0.07201956f, -0.052803773f,-0.15629593f,-0.15001918f,
494 -0.07650751f,0.02359855f, -0.075155355f, -0.08037709f, -0.15093534f,
495 0.029517552f, -0.04751393f, 0.010350531f,-0.02664851f, -0.016839722f,
496 -0.023121163f, 0.0077019283f, 0.012851257f, -0.05040649f,-0.0129761f,
497 -0.021737747f,-0.038305793f,-0.06870586f, -0.01481247f,-0.001285394f,
498 0.10124236f, 0.083122835f, 0.053313006f,-0.062235646f,-0.075637154f,
499 -0.027833903f, 0.029774971f, 0.1130802f, 0.09218906f, 0.09506135f,
500 -0.086665764f,-0.037162706f,-0.038880914f,-0.035832845f,-0.014481564f,
501 -0.09825003f,-0.12048569f,-0.097665586f,-0.05287633f, -0.0964047f,
502 -0.11366429f, 0.035777505f, 0.13568819f, 0.052451383f,0.050649304f,
503 0.05798951f, -0.021852335f,-0.099848844f,0.014740475f,-0.078897946f,
504 0.04974699f, 0.014160473f, 0.06973932f, 0.04964942f, 0.033364646f,
505 0.08190124f, 0.025535367f, 0.050893165f, 0.048514254f,0.06945813f,
506 -0.078907564f,-0.06707616f, -0.11844508f, -0.09986688f,-0.07509403f,
507 0.06263226f, 0.14925587f, 0.20188436f, 0.12098451f,0.14639415f,
508 0.0015017595f, -0.014267382f, -0.03417257f,0.012711468f,0.0028300495f,
509 -0.024758482f, -0.05098548f,-0.0821182f, 0.014225672f, 0.021544158f,
510 0.08949725f, 0.07505268f, -0.0020780868f, 0.04908258f,0.06476295f,
511 -0.022907063f,0.027562456f,0.040185735f, 0.019567577f,-0.015598739f,
512 -0.049097303f, -0.017121866f, -0.083368234f,-0.02332002f,-0.0840956f
513 });
514
515 auto inputGateBias =
516 MakeTensor<float, 1>(tensorInfo20, {0.02234832f, 0.14757581f, 0.18176508f, 0.10380666f, 0.053110216f,
517 -0.06928846f, -0.13942584f, -0.11816189f, 0.19483899f, 0.03652339f,
518 -0.10250295f, 0.036714908f, -0.18426876f, 0.036065217f, 0.21810818f,
519 0.02383196f, -0.043370757f, 0.08690144f, -0.04444982f, 0.00030581196f
520 });
521
522 auto forgetGateBias =
523 MakeTensor<float, 1>(tensorInfo20, {0.035185695f, -0.042891346f, -0.03032477f, 0.23027696f,
524 0.11098921f, 0.15378423f, 0.09263801f, 0.09790885f,
525 0.09508917f, 0.061199076f, 0.07665568f, -0.015443159f,
526 -0.03499149f, 0.046190713f, 0.08895977f, 0.10899629f,
527 0.40694186f, 0.06030037f, 0.012413437f, -0.06108739f
528 });
529
530 auto cellBias =
531 MakeTensor<float, 1>(tensorInfo20, {-0.024379363f, 0.0055531194f, 0.23377132f, 0.033463873f,
532 -0.1483596f, -0.10639995f, -0.091433935f, 0.058573797f,
533 -0.06809782f, -0.07889636f, -0.043246906f, -0.09829136f,
534 -0.4279842f, 0.034901652f, 0.18797937f, 0.0075234566f,
535 0.016178843f, 0.1749513f, 0.13975595f, 0.92058027f
536 });
537
538 auto outputGateBias =
539 MakeTensor<float, 1>(tensorInfo20, {0.046159424f, -0.0012809046f, 0.03563469f, 0.12648113f, 0.027195795f,
540 0.35373217f, -0.018957434f, 0.008907322f, -0.0762701f, 0.12018895f,
541 0.04216877f, 0.0022856654f, 0.040952638f, 0.3147856f, 0.08225149f,
542 -0.057416286f, -0.14995944f, -0.008040261f, 0.13208859f, 0.029760877f
543 });
544
545 auto recurrentToInputWeights =
546 MakeTensor<float, 2>(tensorInfo20x16, {-0.001374326f, -0.078856036f, 0.10672688f, 0.029162422f,
547 -0.11585556f, 0.02557986f, -0.13446963f, -0.035785314f,
548 -0.01244275f, 0.025961924f, -0.02337298f, -0.044228926f,
549 -0.055839065f, -0.046598054f, -0.010546039f, -0.06900766f,
550 0.027239809f, 0.022582639f, -0.013296484f, -0.05459212f,
551 0.08981f, -0.045407712f, 0.08682226f, -0.06867011f,
552 -0.14390695f, -0.02916037f, 0.000996957f, 0.091420636f,
553 0.14283475f, -0.07390571f, -0.06402044f, 0.062524505f,
554 -0.093129106f, 0.04860203f, -0.08364217f, -0.08119002f,
555 0.009352075f, 0.22920375f, 0.0016303885f, 0.11583097f,
556 -0.13732095f, 0.012405723f, -0.07551853f, 0.06343048f,
557 0.12162708f, -0.031923793f, -0.014335606f, 0.01790974f,
558 -0.10650317f, -0.0724401f, 0.08554849f, -0.05727212f,
559 0.06556731f, -0.042729504f, -0.043227166f, 0.011683251f,
560 -0.013082158f, -0.029302018f, -0.010899579f, -0.062036745f,
561 -0.022509435f, -0.00964907f, -0.01567329f, 0.04260106f,
562 -0.07787477f, -0.11576462f, 0.017356863f, 0.048673786f,
563 -0.017577527f, -0.05527947f, -0.082487635f, -0.040137455f,
564 -0.10820036f, -0.04666372f, 0.022746278f, -0.07851417f,
565 0.01068115f, 0.032956902f, 0.022433773f, 0.0026891115f,
566 0.08944216f, -0.0685835f, 0.010513544f, 0.07228705f,
567 0.02032331f, -0.059686817f, -0.0005566496f, -0.086984694f,
568 0.040414046f, -0.1380399f, 0.094208956f, -0.05722982f,
569 0.012092817f, -0.04989123f, -0.086576f, -0.003399834f,
570 -0.04696032f, -0.045747425f, 0.10091314f, 0.048676282f,
571 -0.029037097f, 0.031399418f, -0.0040285117f, 0.047237843f,
572 0.09504992f, 0.041799378f, -0.049185462f, -0.031518843f,
573 -0.10516937f, 0.026374253f, 0.10058866f, -0.0033195973f,
574 -0.041975245f, 0.0073591834f, 0.0033782164f, -0.004325073f,
575 -0.10167381f, 0.042500053f, -0.01447153f, 0.06464186f,
576 -0.017142897f, 0.03312627f, 0.009205989f, 0.024138335f,
577 -0.011337001f, 0.035530265f, -0.010912711f, 0.0706555f,
578 -0.005894094f, 0.051841937f, -0.1401738f, -0.02351249f,
579 0.0365468f, 0.07590991f, 0.08838724f, 0.021681072f,
580 -0.10086113f, 0.019608743f, -0.06195883f, 0.077335775f,
581 0.023646897f, -0.095322326f, 0.02233014f, 0.09756986f,
582 -0.048691444f, -0.009579111f, 0.07595467f, 0.11480546f,
583 -0.09801813f, 0.019894179f, 0.08502348f, 0.004032281f,
584 0.037211012f, 0.068537936f, -0.048005626f, -0.091520436f,
585 -0.028379958f, -0.01556313f, 0.06554592f, -0.045599163f,
586 -0.01672207f, -0.020169014f, -0.011877351f, -0.20212261f,
587 0.010889619f, 0.0047078193f, 0.038385306f, 0.08540671f,
588 -0.017140968f, -0.0035865551f, 0.016678626f, 0.005633034f,
589 0.015963363f, 0.00871737f, 0.060130805f, 0.028611384f,
590 0.10109069f, -0.015060172f, -0.07894427f, 0.06401885f,
591 0.011584063f, -0.024466386f, 0.0047652307f, -0.09041358f,
592 0.030737216f, -0.0046374933f, 0.14215417f, -0.11823516f,
593 0.019899689f, 0.006106124f, -0.027092824f, 0.0786356f,
594 0.05052217f, -0.058925f, -0.011402121f, -0.024987547f,
595 -0.0013661642f, -0.06832946f, -0.015667673f, -0.1083353f,
596 -0.00096863037f, -0.06988685f, -0.053350925f, -0.027275559f,
597 -0.033664223f, -0.07978348f, -0.025200296f, -0.017207067f,
598 -0.058403496f, -0.055697463f, 0.005798788f, 0.12965427f,
599 -0.062582195f, 0.0013350133f, -0.10482091f, 0.0379771f,
600 0.072521195f, -0.0029455067f, -0.13797039f, -0.03628521f,
601 0.013806405f, -0.017858358f, -0.01008298f, -0.07700066f,
602 -0.017081132f, 0.019358726f, 0.0027079724f, 0.004635139f,
603 0.062634714f, -0.02338735f, -0.039547626f, -0.02050681f,
604 0.03385117f, -0.083611414f, 0.002862572f, -0.09421313f,
605 0.058618143f, -0.08598433f, 0.00972939f, 0.023867095f,
606 -0.053934585f, -0.023203006f, 0.07452513f, -0.048767887f,
607 -0.07314807f, -0.056307215f, -0.10433547f, -0.06440842f,
608 0.04328182f, 0.04389765f, -0.020006588f, -0.09076438f,
609 -0.11652589f, -0.021705797f, 0.03345259f, -0.010329105f,
610 -0.025767034f, 0.013057034f, -0.07316461f, -0.10145612f,
611 0.06358255f, 0.18531723f, 0.07759293f, 0.12006465f,
612 0.1305557f, 0.058638252f, -0.03393652f, 0.09622831f,
613 -0.16253184f, -2.4580743e-06f, 0.079869635f, -0.070196845f,
614 -0.005644518f, 0.06857898f, -0.12598175f, -0.035084512f,
615 0.03156317f, -0.12794146f, -0.031963028f, 0.04692781f,
616 0.030070418f, 0.0071660685f, -0.095516115f, -0.004643372f,
617 0.040170413f, -0.062104587f, -0.0037324072f, 0.0554317f,
618 0.08184801f, -0.019164372f, 0.06791302f, 0.034257166f,
619 -0.10307039f, 0.021943003f, 0.046745934f, 0.0790918f,
620 -0.0265588f, -0.007824208f, 0.042546265f, -0.00977924f,
621 -0.0002440307f, -0.017384544f, -0.017990116f, 0.12252321f,
622 -0.014512694f, -0.08251313f, 0.08861942f, 0.13589665f,
623 0.026351685f, 0.012641483f, 0.07466548f, 0.044301085f,
624 -0.045414884f, -0.051112458f, 0.03444247f, -0.08502782f,
625 -0.04106223f, -0.028126027f, 0.028473156f, 0.10467447f
626 });
627
628 auto recurrentToForgetWeights =
629 MakeTensor<float, 2>(tensorInfo20x16, {-0.057784554f, -0.026057621f, -0.068447545f, -0.022581743f,
630 0.14811787f, 0.10826372f, 0.09471067f, 0.03987225f,
631 -0.0039523416f, 0.00030638507f, 0.053185795f, 0.10572994f,
632 0.08414449f, -0.022036452f, -0.00066928595f, -0.09203576f,
633 0.032950465f, -0.10985798f, -0.023809856f, 0.0021431844f,
634 -0.02196096f, -0.00326074f, 0.00058621005f, -0.074678116f,
635 -0.06193199f, 0.055729095f, 0.03736828f, 0.020123724f,
636 0.061878487f, -0.04729229f, 0.034919553f, -0.07585433f,
637 -0.04421272f, -0.044019096f, 0.085488975f, 0.04058006f,
638 -0.06890133f, -0.030951202f, -0.024628663f, -0.07672815f,
639 0.034293607f, 0.08556707f, -0.05293577f, -0.033561368f,
640 -0.04899627f, 0.0241671f, 0.015736353f, -0.095442444f,
641 -0.029564252f, 0.016493602f, -0.035026584f, 0.022337519f,
642 -0.026871363f, 0.004780428f, 0.0077918363f, -0.03601621f,
643 0.016435321f, -0.03263031f, -0.09543275f, -0.047392778f,
644 0.013454138f, 0.028934088f, 0.01685226f, -0.086110644f,
645 -0.046250615f, -0.01847454f, 0.047608484f, 0.07339695f,
646 0.034546845f, -0.04881143f, 0.009128804f, -0.08802852f,
647 0.03761666f, 0.008096139f, -0.014454086f, 0.014361001f,
648 -0.023502491f, -0.0011840804f, -0.07607001f, 0.001856849f,
649 -0.06509276f, -0.006021153f, -0.08570962f, -0.1451793f,
650 0.060212336f, 0.055259194f, 0.06974018f, 0.049454916f,
651 -0.027794661f, -0.08077226f, -0.016179763f, 0.1169753f,
652 0.17213494f, -0.0056326236f, -0.053934924f, -0.0124349f,
653 -0.11520337f, 0.05409887f, 0.088759385f, 0.0019655675f,
654 0.0042065294f, 0.03881498f, 0.019844765f, 0.041858196f,
655 -0.05695512f, 0.047233116f, 0.038937137f, -0.06542224f,
656 0.014429736f, -0.09719407f, 0.13908425f, -0.05379757f,
657 0.012321099f, 0.082840554f, -0.029899208f, 0.044217527f,
658 0.059855383f, 0.07711018f, -0.045319796f, 0.0948846f,
659 -0.011724666f, -0.0033288454f, -0.033542685f, -0.04764985f,
660 -0.13873616f, 0.040668588f, 0.034832682f, -0.015319203f,
661 -0.018715994f, 0.046002675f, 0.0599172f, -0.043107376f,
662 0.0294216f, -0.002314414f, -0.022424703f, 0.0030315618f,
663 0.0014641669f, 0.0029166266f, -0.11878115f, 0.013738511f,
664 0.12375372f, -0.0006038222f, 0.029104086f, 0.087442465f,
665 0.052958444f, 0.07558703f, 0.04817258f, 0.044462286f,
666 -0.015213451f, -0.08783778f, -0.0561384f, -0.003008196f,
667 0.047060397f, -0.002058388f, 0.03429439f, -0.018839769f,
668 0.024734668f, 0.024614193f, -0.042046934f, 0.09597743f,
669 -0.0043254104f, 0.04320769f, 0.0064070094f, -0.0019131786f,
670 -0.02558259f, -0.022822596f, -0.023273505f, -0.02464396f,
671 -0.10991725f, -0.006240552f, 0.0074488563f, 0.024044557f,
672 0.04383914f, -0.046476185f, 0.028658995f, 0.060410924f,
673 0.050786525f, 0.009452605f, -0.0073054377f, -0.024810238f,
674 0.0052906186f, 0.0066939713f, -0.0020913032f, 0.014515517f,
675 0.015898481f, 0.021362653f, -0.030262267f, 0.016587038f,
676 -0.011442813f, 0.041154444f, -0.007631438f, -0.03423484f,
677 -0.010977775f, 0.036152758f, 0.0066366293f, 0.11915515f,
678 0.02318443f, -0.041350313f, 0.021485701f, -0.10906167f,
679 -0.028218046f, -0.00954771f, 0.020531068f, -0.11995105f,
680 -0.03672871f, 0.024019798f, 0.014255957f, -0.05221243f,
681 -0.00661567f, -0.04630967f, 0.033188973f, 0.10107534f,
682 -0.014027541f, 0.030796422f, -0.10270911f, -0.035999842f,
683 0.15443139f, 0.07684145f, 0.036571592f, -0.035900835f,
684 -0.0034699554f, 0.06209149f, 0.015920248f, -0.031122351f,
685 -0.03858649f, 0.01849943f, 0.13872518f, 0.01503974f,
686 0.069941424f, -0.06948533f, -0.0088794185f, 0.061282158f,
687 -0.047401894f, 0.03100163f, -0.041533746f, -0.10430945f,
688 0.044574402f, -0.01425562f, -0.024290353f, 0.034563623f,
689 0.05866852f, 0.023947537f, -0.09445152f, 0.035450947f,
690 0.02247216f, -0.0042998926f, 0.061146557f, -0.10250651f,
691 0.020881841f, -0.06747029f, 0.10062043f, -0.0023941975f,
692 0.03532124f, -0.016341697f, 0.09685456f, -0.016764693f,
693 0.051808182f, 0.05875331f, -0.04536488f, 0.001626336f,
694 -0.028892258f, -0.01048663f, -0.009793449f, -0.017093895f,
695 0.010987891f, 0.02357273f, -0.00010856845f, 0.0099760275f,
696 -0.001845119f, -0.03551521f, 0.0018358806f, 0.05763657f,
697 -0.01769146f, 0.040995963f, 0.02235177f, -0.060430344f,
698 0.11475477f, -0.023854522f, 0.10071741f, 0.0686208f,
699 -0.014250481f, 0.034261297f, 0.047418304f, 0.08562733f,
700 -0.030519066f, 0.0060542435f, 0.014653856f, -0.038836084f,
701 0.04096551f, 0.032249358f, -0.08355519f, -0.026823482f,
702 0.056386515f, -0.010401743f, -0.028396193f, 0.08507674f,
703 0.014410365f, 0.020995233f, 0.17040324f, 0.11511526f,
704 0.02459721f, 0.0066619175f, 0.025853224f, -0.023133837f,
705 -0.081302024f, 0.017264642f, -0.009585969f, 0.09491168f,
706 -0.051313367f, 0.054532815f, -0.014298593f, 0.10657464f,
707 0.007076659f, 0.10964551f, 0.0409152f, 0.008275321f,
708 -0.07283536f, 0.07937492f, 0.04192024f, -0.1075027f
709 });
710
711 auto recurrentToCellWeights =
712 MakeTensor<float, 2>(tensorInfo20x16, {-0.037322544f, 0.018592842f, 0.0056175636f, -0.06253426f,
713 0.055647098f, -0.05713207f, -0.05626563f, 0.005559383f,
714 0.03375411f, -0.025757805f, -0.088049285f, 0.06017052f,
715 -0.06570978f, 0.007384076f, 0.035123326f, -0.07920549f,
716 0.053676967f, 0.044480428f, -0.07663568f, 0.0071805613f,
717 0.08089997f, 0.05143358f, 0.038261272f, 0.03339287f,
718 -0.027673481f, 0.044746667f, 0.028349208f, 0.020090483f,
719 -0.019443132f, -0.030755889f, -0.0040000007f, 0.04465846f,
720 -0.021585021f, 0.0031670958f, 0.0053199246f, -0.056117613f,
721 -0.10893326f, 0.076739706f, -0.08509834f, -0.027997585f,
722 0.037871376f, 0.01449768f, -0.09002357f, -0.06111149f,
723 -0.046195522f, 0.0422062f, -0.005683705f, -0.1253618f,
724 -0.012925729f, -0.04890792f, 0.06985068f, 0.037654128f,
725 0.03398274f, -0.004781977f, 0.007032333f, -0.031787455f,
726 0.010868644f, -0.031489216f, 0.09525667f, 0.013939797f,
727 0.0058680447f, 0.0167067f, 0.02668468f, -0.04797466f,
728 -0.048885044f, -0.12722108f, 0.035304096f, 0.06554885f,
729 0.00972396f, -0.039238118f, -0.05159735f, -0.11329045f,
730 0.1613692f, -0.03750952f, 0.06529313f, -0.071974665f,
731 -0.11769596f, 0.015524369f, -0.0013754242f, -0.12446318f,
732 0.02786344f, -0.014179351f, 0.005264273f, 0.14376344f,
733 0.015983658f, 0.03406988f, -0.06939408f, 0.040699873f,
734 0.02111075f, 0.09669095f, 0.041345075f, -0.08316494f,
735 -0.07684199f, -0.045768797f, 0.032298047f, -0.041805092f,
736 0.0119405f, 0.0061010392f, 0.12652606f, 0.0064572375f,
737 -0.024950314f, 0.11574242f, 0.04508852f, -0.04335324f,
738 0.06760663f, -0.027437469f, 0.07216407f, 0.06977076f,
739 -0.05438599f, 0.034033038f, -0.028602652f, 0.05346137f,
740 0.043184172f, -0.037189785f, 0.10420091f, 0.00882477f,
741 -0.054019816f, -0.074273005f, -0.030617684f, -0.0028467078f,
742 0.024302477f, -0.0038869337f, 0.005332455f, 0.0013399826f,
743 0.04361412f, -0.007001822f, 0.09631092f, -0.06702025f,
744 -0.042049985f, -0.035070654f, -0.04103342f, -0.10273396f,
745 0.0544271f, 0.037184782f, -0.13150354f, -0.0058036847f,
746 -0.008264958f, 0.042035464f, 0.05891794f, 0.029673764f,
747 0.0063542654f, 0.044788733f, 0.054816857f, 0.062257513f,
748 -0.00093483756f, 0.048938446f, -0.004952862f, -0.007730018f,
749 -0.04043371f, -0.017094059f, 0.07229206f, -0.023670016f,
750 -0.052195564f, -0.025616996f, -0.01520939f, 0.045104615f,
751 -0.007376126f, 0.003533447f, 0.006570588f, 0.056037236f,
752 0.12436656f, 0.051817212f, 0.028532185f, -0.08686856f,
753 0.11868599f, 0.07663395f, -0.07323171f, 0.03463402f,
754 -0.050708205f, -0.04458982f, -0.11590894f, 0.021273347f,
755 0.1251325f, -0.15313013f, -0.12224372f, 0.17228661f,
756 0.023029093f, 0.086124025f, 0.006445803f, -0.03496501f,
757 0.028332196f, 0.04449512f, -0.042436164f, -0.026587414f,
758 -0.006041347f, -0.09292539f, -0.05678812f, 0.03897832f,
759 0.09465633f, 0.008115513f, -0.02171956f, 0.08304309f,
760 0.071401566f, 0.019622514f, 0.032163795f, -0.004167056f,
761 0.02295182f, 0.030739572f, 0.056506045f, 0.004612461f,
762 0.06524936f, 0.059999723f, 0.046395954f, -0.0045512207f,
763 -0.1335546f, -0.030136576f, 0.11584653f, -0.014678886f,
764 0.0020118146f, -0.09688814f, -0.0790206f, 0.039770417f,
765 -0.0329582f, 0.07922767f, 0.029322514f, 0.026405897f,
766 0.04207835f, -0.07073373f, 0.063781224f, 0.0859677f,
767 -0.10925287f, -0.07011058f, 0.048005477f, 0.03438226f,
768 -0.09606514f, -0.006669445f, -0.043381985f, 0.04240257f,
769 -0.06955775f, -0.06769346f, 0.043903265f, -0.026784198f,
770 -0.017840602f, 0.024307009f, -0.040079936f, -0.019946516f,
771 0.045318738f, -0.12233574f, 0.026170589f, 0.0074471775f,
772 0.15978073f, 0.10185836f, 0.10298046f, -0.015476589f,
773 -0.039390966f, -0.072174534f, 0.0739445f, -0.1211869f,
774 -0.0347889f, -0.07943156f, 0.014809798f, -0.12412325f,
775 -0.0030663363f, 0.039695457f, 0.0647603f, -0.08291318f,
776 -0.018529687f, -0.004423833f, 0.0037507233f, 0.084633216f,
777 -0.01514876f, -0.056505352f, -0.012800942f, -0.06994386f,
778 0.012962922f, -0.031234352f, 0.07029052f, 0.016418684f,
779 0.03618972f, 0.055686004f, -0.08663945f, -0.017404709f,
780 -0.054761406f, 0.029065743f, 0.052404847f, 0.020238016f,
781 0.0048197987f, -0.0214882f, 0.07078733f, 0.013016777f,
782 0.06262858f, 0.009184685f, 0.020785125f, -0.043904778f,
783 -0.0270329f, -0.03299152f, -0.060088247f, -0.015162964f,
784 -0.001828936f, 0.12642565f, -0.056757294f, 0.013586685f,
785 0.09232601f, -0.035886683f, 0.06000002f, 0.05229691f,
786 -0.052580316f, -0.082029596f, -0.010794592f, 0.012947712f,
787 -0.036429964f, -0.085508935f, -0.13127148f, -0.017744139f,
788 0.031502828f, 0.036232427f, -0.031581745f, 0.023051167f,
789 -0.05325106f, -0.03421577f, 0.028793324f, -0.034633752f,
790 -0.009881397f, -0.043551125f, -0.018609839f, 0.0019097115f,
791 -0.008799762f, 0.056595087f, 0.0022273948f, 0.055752404f
792 });
793
794 auto recurrentToOutputWeights =
795 MakeTensor<float, 2>(tensorInfo20x16, {0.025825322f, -0.05813119f, 0.09495884f,-0.045984812f, -0.01255415f,
796 -0.0026479573f,-0.08196161f,-0.054914974f,-0.0046604523f,
797 -0.029587349f, -0.044576716f, -0.07480124f, -0.082868785f,
798 0.023254942f, 0.027502948f, -0.0039728214f, -0.08683098f,
799 -0.08116779f, -0.014675607f, -0.037924774f, -0.023314456f,
800 -0.007401714f, -0.09255757f, 0.029460307f, -0.08829125f,
801 -0.005139627f, -0.08989442f, -0.0555066f, 0.13596267f,
802 -0.025062224f, -0.048351806f, -0.03850004f, 0.07266485f,
803 -0.022414139f, 0.05940088f, 0.075114764f, 0.09597592f,
804 -0.010211725f, -0.0049794707f, -0.011523867f, -0.025980417f,
805 0.072999895f, 0.11091378f, -0.081685916f, 0.014416728f,
806 0.043229222f, 0.034178585f, -0.07530371f, 0.035837382f,
807 -0.085607f, -0.007721233f, -0.03287832f, -0.043848954f,
808 -0.06404588f, -0.06632928f, -0.073643476f, 0.008214239f,
809 -0.045984086f, 0.039764922f, 0.03474462f, 0.060612556f,
810 -0.080590084f, 0.049127717f, 0.04151091f, -0.030063879f,
811 0.008801774f, -0.023021035f, -0.019558564f, 0.05158114f,
812 -0.010947698f, -0.011825728f, 0.0075720972f, 0.0699727f,
813 -0.0039981045f, 0.069350146f, 0.08799282f, 0.016156472f,
814 0.035502106f, 0.11695009f, 0.006217345f, 0.13392477f,
815 -0.037875112f, 0.025745004f, 0.08940699f, -0.00924166f,
816 0.0046702605f, -0.036598757f, -0.08811812f, 0.10522024f,
817 -0.032441203f, 0.008176899f, -0.04454919f, 0.07058152f,
818 0.0067963637f, 0.039206743f, 0.03259838f, 0.03725492f,
819 -0.09515802f, 0.013326398f, -0.052055415f, -0.025676316f,
820 0.03198509f, -0.015951829f, -0.058556724f, 0.036879618f,
821 0.043357447f, 0.028362012f, -0.05908629f, 0.0059240665f,
822 -0.04995891f, -0.019187413f,0.0276265f, -0.01628143f, 0.0025863599f,
823 0.08800015f, 0.035250366f, -0.022165963f, -0.07328642f,
824 -0.009415526f, -0.07455109f, 0.11690406f, 0.0363299f,
825 0.07411125f, 0.042103454f, -0.009660886f, 0.019076364f,
826 0.018299393f, -0.046004917f, 0.08891175f,0.0431396f, -0.026327137f,
827 -0.051502608f, 0.08979574f, -0.051670972f, 0.04940282f,
828 -0.07491107f, -0.021240504f, 0.022596184f, -0.034280192f,
829 0.060163025f, -0.058211457f, -0.051837247f, -0.01349775f,
830 -0.04639988f, -0.035936575f, -0.011681591f, 0.064818054f,
831 0.0073146066f, -0.021745546f, -0.043124277f, -0.06471268f,
832 -0.07053354f, -0.029321948f, -0.05330136f, 0.016933719f,
833 -0.053782392f, 0.13747959f, -0.1361751f, -0.11569455f,
834 0.0033329215f, 0.05693899f, -0.053219706f, 0.063698f,
835 0.07977434f, -0.07924483f, 0.06936997f, 0.0034815092f,
836 -0.007305279f, -0.037325785f, -0.07251102f, -0.033633437f,
837 -0.08677009f, 0.091591336f, -0.14165086f, 0.021752775f,
838 0.019683983f, 0.0011612234f, -0.058154266f, 0.049996935f,
839 0.0288841f, -0.0024567875f, -0.14345716f, 0.010955264f,-0.10234828f,
840 0.1183656f, -0.0010731248f, -0.023590032f,-0.072285876f,-0.0724771f,
841 -0.026382286f, -0.0014920527f, 0.042667855f, 0.0018776858f,
842 0.02986552f, 0.009814309f, 0.0733756f, 0.12289186f,
843 0.018043943f, -0.0458958f, 0.049412545f, 0.033632483f,
844 0.05495232f, 0.036686596f, -0.013781798f, -0.010036754f,
845 0.02576849f, -0.08307328f, 0.010112348f, 0.042521734f,
846 -0.05869831f, -0.071689695f, 0.03876447f, -0.13275425f, -0.0352966f,
847 -0.023077697f, 0.10285965f, 0.084736146f, 0.15568255f,
848 -0.00040734606f, 0.027835453f, -0.10292561f, -0.032401145f,
849 0.10053256f, -0.026142767f, -0.08271222f, -0.0030240538f,
850 -0.016368777f, 0.1070414f, 0.042672627f, 0.013456989f,
851 -0.0437609f, -0.022309763f, 0.11576483f, 0.04108048f,
852 0.061026827f, -0.0190714f, -0.0869359f, 0.037901703f, 0.0610107f,
853 0.07202949f, 0.01675338f, 0.086139716f, -0.08795751f,
854 -0.014898893f, -0.023771819f, -0.01965048f, 0.007955471f,
855 -0.043740474f, 0.03346837f, -0.10549954f, 0.090567775f,
856 0.042013682f, -0.03176985f, 0.12569028f, -0.02421228f,
857 -0.029526481f, 0.023851605f, 0.031539805f, 0.05292009f,
858 -0.02344001f, -0.07811758f, -0.08834428f, 0.10094801f,
859 0.16594367f, -0.06861939f, -0.021256343f, -0.041093912f,
860 -0.06669611f, 0.035498552f, 0.021757556f, -0.09302526f,
861 -0.015403468f, -0.06614931f, -0.051798206f, -0.013874718f,
862 0.03630673f, 0.010412845f, -0.08077351f, 0.046185967f,
863 0.0035662893f, 0.03541868f, -0.094149634f, -0.034814864f,
864 0.003128424f, -0.020674974f, -0.03944324f, -0.008110165f,
865 -0.11113267f, 0.08484226f, 0.043586485f, 0.040582247f,
866 0.0968012f, -0.065249965f, -0.028036479f, 0.0050708856f,
867 0.0017462453f, 0.0326779f, 0.041296225f, 0.09164146f,
868 -0.047743853f, -0.015952192f, -0.034451712f, 0.084197424f,
869 -0.05347844f, -0.11768019f, 0.085926116f, -0.08251791f,
870 -0.045081906f, 0.0948852f, 0.068401024f, 0.024856757f,
871 0.06978981f, -0.057309967f, -0.012775832f, -0.0032452994f,
872 0.01977615f, -0.041040014f, -0.024264973f,0.063464895f, 0.05431621f
873 });
874
875 auto cellToInputWeights =
876 MakeTensor<float, 1>(tensorInfo20, {0.040369894f, 0.030746894f, 0.24704495f, 0.018586371f, -0.037586458f,
877 -0.15312155f, -0.11812848f, -0.11465643f, 0.20259799f, 0.11418174f,
878 -0.10116027f, -0.011334949f, 0.12411352f, -0.076769054f,-0.052169047f,
879 0.21198851f, -0.38871562f, -0.09061183f, -0.09683246f, -0.21929175f
880 });
881
882
883 auto cellToForgetWeights =
884 MakeTensor<float, 1>(tensorInfo20, {-0.01998659f,-0.15568835f,-0.24248174f, -0.012770197f, 0.041331276f,
885 -0.072311886f, -0.052123554f,-0.0066330447f,-0.043891653f,0.036225766f,
886 -0.047248036f, 0.021479502f,0.033189066f, 0.11952997f, -0.020432774f,
887 0.64658105f, -0.06650122f, -0.03467612f, 0.095340036f, 0.23647355f
888 });
889
890 auto cellToOutputWeights =
891 MakeTensor<float, 1>(tensorInfo20, {0.08286371f, -0.08261836f, -0.51210177f, 0.002913762f, 0.17764764f,
892 -0.5495371f, -0.08460716f, -0.24552552f, 0.030037103f, 0.04123544f,
893 -0.11940523f, 0.007358328f, 0.1890978f, 0.4833202f, -0.34441817f,
894 0.36312827f, -0.26375428f, 0.1457655f, -0.19724406f, 0.15548733f
895 });
896
897 auto projectionWeights =
898 MakeTensor<float, 2>(tensorInfo16x20,
899 {-0.009802181f, 0.09401916f, 0.0717386f, -0.13895074f, 0.09641832f,
900 0.060420845f, 0.08539281f, 0.054285463f, 0.061395317f, 0.034448683f,
901 -0.042991187f, 0.019801661f, -0.16840284f, -0.015726732f, -0.23041931f,
902 -0.024478018f, -0.10959692f, -0.013875541f, 0.18600968f, -0.061274476f,
903 0.0138165f, -0.08160894f, -0.07661644f, 0.032372914f, 0.16169067f,
904 0.22465782f, -0.03993472f, -0.004017731f, 0.08633481f, -0.28869787f,
905 0.08682067f, 0.17240396f, 0.014975425f, 0.056431185f, 0.031037588f,
906 0.16702051f, 0.0077946745f, 0.15140012f, 0.29405436f, 0.120285f,
907 -0.188994f, -0.027265169f, 0.043389652f, -0.022061434f, 0.014777949f,
908 -0.20203483f, 0.094781205f, 0.19100232f, 0.13987629f, -0.036132768f,
909 -0.06426278f, -0.05108664f, 0.13221376f, 0.009441198f, -0.16715929f,
910 0.15859416f, -0.040437475f, 0.050779544f, -0.022187516f, 0.012166504f,
911 0.027685808f, -0.07675938f, -0.0055694645f, -0.09444123f, 0.0046453946f,
912 0.050794356f, 0.10770313f, -0.20790008f, -0.07149004f, -0.11425117f,
913 0.008225835f, -0.035802525f, 0.14374903f, 0.15262283f, 0.048710253f,
914 0.1847461f, -0.007487823f, 0.11000021f, -0.09542012f, 0.22619456f,
915 -0.029149994f, 0.08527916f, 0.009043713f, 0.0042746216f, 0.016261552f,
916 0.022461696f, 0.12689082f, -0.043589946f, -0.12035478f, -0.08361797f,
917 -0.050666027f, -0.1248618f, -0.1275799f, -0.071875185f, 0.07377272f,
918 0.09944291f, -0.18897448f, -0.1593054f, -0.06526116f, -0.040107165f,
919 -0.004618631f, -0.067624845f, -0.007576253f, 0.10727444f, 0.041546922f,
920 -0.20424393f, 0.06907816f, 0.050412357f, 0.00724631f, 0.039827548f,
921 0.12449835f, 0.10747581f, 0.13708383f, 0.09134148f, -0.12617786f,
922 -0.06428341f, 0.09956831f, 0.1208086f, -0.14676677f, -0.0727722f,
923 0.1126304f, 0.010139365f, 0.015571211f, -0.038128063f, 0.022913318f,
924 -0.042050496f, 0.16842307f, -0.060597885f, 0.10531834f, -0.06411776f,
925 -0.07451711f, -0.03410368f, -0.13393489f, 0.06534304f, 0.003620307f,
926 0.04490757f, 0.05970546f, 0.05197996f, 0.02839995f, 0.10434969f,
927 -0.013699693f, -0.028353551f, -0.07260381f, 0.047201227f, -0.024575593f,
928 -0.036445823f, 0.07155557f, 0.009672501f, -0.02328883f, 0.009533515f,
929 -0.03606021f, -0.07421458f, -0.028082801f, -0.2678904f, -0.13221288f,
930 0.18419984f, -0.13012612f, -0.014588381f, -0.035059117f, -0.04824723f,
931 0.07830115f, -0.056184657f, 0.03277091f, 0.025466874f, 0.14494097f,
932 -0.12522776f, -0.098633975f, -0.10766018f, -0.08317623f, 0.08594209f,
933 0.07749552f, 0.039474737f, 0.1776665f, -0.07409566f, -0.0477268f,
934 0.29323658f, 0.10801441f, 0.1154011f, 0.013952499f, 0.10739139f,
935 0.10708251f, -0.051456142f, 0.0074137426f, -0.10430189f, 0.10034707f,
936 0.045594677f, 0.0635285f, -0.0715442f, -0.089667566f, -0.10811871f,
937 0.00026344223f, 0.08298446f, -0.009525053f, 0.006585689f, -0.24567553f,
938 -0.09450807f, 0.09648481f, 0.026996298f, -0.06419476f, -0.04752702f,
939 -0.11063944f, -0.23441927f, -0.17608605f, -0.052156363f, 0.067035615f,
940 0.19271925f, -0.0032889997f, -0.043264326f, 0.09663576f, -0.057112187f,
941 -0.10100678f, 0.0628376f, 0.04447668f, 0.017961001f, -0.10094388f,
942 -0.10190601f, 0.18335468f, 0.10494553f, -0.052095775f, -0.0026118709f,
943 0.10539724f, -0.04383912f, -0.042349473f, 0.08438151f, -0.1947263f,
944 0.02251204f, 0.11216432f, -0.10307853f, 0.17351969f, -0.039091777f,
945 0.08066188f, -0.00561982f, 0.12633002f, 0.11335965f, -0.0088127935f,
946 -0.019777594f, 0.06864014f, -0.059751723f, 0.016233567f, -0.06894641f,
947 -0.28651384f, -0.004228674f, 0.019708522f, -0.16305895f, -0.07468996f,
948 -0.0855457f, 0.099339016f, -0.07580735f, -0.13775392f, 0.08434318f,
949 0.08330512f, -0.12131499f, 0.031935584f, 0.09180414f, -0.08876437f,
950 -0.08049874f, 0.008753825f, 0.03498998f, 0.030215185f, 0.03907079f,
951 0.089751154f, 0.029194152f, -0.03337423f, -0.019092513f, 0.04331237f,
952 0.04299654f, -0.036394123f, -0.12915532f, 0.09793732f, 0.07512415f,
953 -0.11319543f, -0.032502122f, 0.15661901f, 0.07671967f, -0.005491124f,
954 -0.19379048f, -0.218606f, 0.21448623f, 0.017840758f, 0.1416943f,
955 -0.07051762f, 0.19488361f, 0.02664691f, -0.18104725f, -0.09334311f,
956 0.15026465f, -0.15493552f, -0.057762887f, -0.11604192f, -0.262013f,
957 -0.01391798f, 0.012185008f, 0.11156489f, -0.07483202f, 0.06693364f,
958 -0.26151478f, 0.046425626f, 0.036540434f, -0.16435726f, 0.17338543f,
959 -0.21401681f, -0.11385144f, -0.08283257f, -0.069031075f, 0.030635102f,
960 0.010969227f, 0.11109743f, 0.010919218f, 0.027526086f, 0.13519906f,
961 0.01891392f, -0.046839405f, -0.040167913f, 0.017953383f, -0.09700955f,
962 0.0061885654f, -0.07000971f, 0.026893595f, -0.038844477f, 0.14543656f
963 });
964
965 std::vector<float> projectionBiasVector(outputSize, 0.f);
966 auto projectionBias = MakeTensor<float,1>(tensorInfo16, projectionBiasVector);
967
968 armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo20x5);
969 armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo20x5);
970 armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo20x5);
971 armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo20x5);
972 armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo20x16);
973 armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo20x16);
974 armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo20x16);
975 armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo20x16);
976 armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo20);
977 armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo20);
978 armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo20);
979 armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo20);
980 armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo20);
981 armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo20);
982 armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo20);
983 armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo16x20);
984 armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo16);
985
986 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
987 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
988 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
989 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
990 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
991 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
992 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
993 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
994 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
995 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
996 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
997 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
998 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
999 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1000 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1001 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
1002 AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]);
1003
1004 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1005 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1006 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1007 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1008 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1009 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1010 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1011 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1012 data.m_CellToInputWeights = &cellToInputWeightsTensor;
1013 data.m_InputGateBias = &inputGateBiasTensor;
1014 data.m_ForgetGateBias = &forgetGateBiasTensor;
1015 data.m_CellBias = &cellBiasTensor;
1016 data.m_OutputGateBias = &outputGateBiasTensor;
1017 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1018 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1019 data.m_ProjectionWeights = &projectionWeightsTensor;
1020 data.m_ProjectionBias = &projectionBiasTensor;
1021
1022 // Flags to set test configuration
1023 data.m_Parameters.m_ActivationFunc = 4;
1024 data.m_Parameters.m_CifgEnabled = false;
1025 data.m_Parameters.m_PeepholeEnabled = true;
1026 data.m_Parameters.m_ProjectionEnabled = true;
1027
1028
1029 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1030 inputHandle->Allocate();
1031 outputStateInHandle->Allocate();
1032 cellStateInHandle->Allocate();
1033
1034 scratchHandle->Allocate();
1035 outputStateOutHandle->Allocate();
1036 cellStateOutHandle->Allocate();
1037 outputHandle->Allocate();
1038
1039 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1040 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1041 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1042
telsoa01c577f2c2018-08-31 09:22:23 +01001043 workload->Execute();
1044
1045 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1046
1047 return ret;
1048
1049}
1050
Conor Kennedyb9971c92019-05-07 07:14:23 +01001051template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1052LayerTestResult<T, 2> LstmLayerWithCifgWithPeepholeNoProjectionTestImpl(
Aron Virginas-Tar5caf9072018-11-14 18:35:18 +00001053 armnn::IWorkloadFactory& workloadFactory,
1054 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
Conor Kennedyb9971c92019-05-07 07:14:23 +01001055 const boost::multi_array<T, 2>& input,
1056 const boost::multi_array<T, 2>& outputExpected,
1057 float qScale = 0.0f,
1058 int32_t qOffset = 0,
1059 armnn::DataType constantDataType = armnn::DataType::Float32)
telsoa01c577f2c2018-08-31 09:22:23 +01001060{
1061 bool cifgEnabled = true;
1062 bool peepholeEnabled = true;
1063 bool projectionEnabled = false;
1064 // These are not the input and the output of Lstm yet
1065 unsigned int batchSize = boost::numeric_cast<unsigned int>(input.shape()[0]);
1066 unsigned int inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
1067
1068 unsigned int outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
1069
1070 const unsigned int cellSize = outputSize;
1071
1072 // Decide the shape of all input tensors
Conor Kennedyb9971c92019-05-07 07:14:23 +01001073 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset); // change to ArmnnType
1074 armnn::TensorInfo outputStateInTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1075 armnn::TensorInfo cellStateInTensorInfo({batchSize, cellSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +01001076
Matteo Martincigha65b7ae2018-11-14 12:39:55 +00001077 unsigned int scratchBufferSize = cifgEnabled ? cellSize * 3 : cellSize * 4;
Conor Kennedyb9971c92019-05-07 07:14:23 +01001078 armnn::TensorInfo scratchBufferTensorInfo({batchSize, scratchBufferSize}, ArmnnType, qScale, qOffset);
1079 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1080 armnn::TensorInfo cellStateOutTensorInfo({batchSize, cellSize}, ArmnnType, qScale, qOffset);
1081 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +01001082
1083 // List of inputs
1084 std::vector<float> inputData;
1085 inputData.assign(input.data(), input.data() + batchSize*inputSize);
1086 auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputData);
1087
1088 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1089 auto outputStateInTensor = MakeTensor<float, 2>(outputStateInTensorInfo, outputStateInVector);
1090
1091 std::vector<float> cellStateInVector(batchSize * cellSize, 0.f);
1092 auto cellStateInTensor = MakeTensor<float, 2>(cellStateInTensorInfo, cellStateInVector);
1093
1094
1095 // Prepare all the weights in the descriptor for LSTM
1096 armnn::LstmQueueDescriptor data;
Conor Kennedyb9971c92019-05-07 07:14:23 +01001097 armnn::TensorInfo tensorInfoInput({cellSize, inputSize}, constantDataType, qScale, qOffset);
1098 armnn::TensorInfo tensorInfoOutput({cellSize, outputSize}, constantDataType, qScale, qOffset);
1099 armnn::TensorInfo tensorInfoNumUnits({cellSize}, constantDataType, qScale, qOffset);
telsoa01c577f2c2018-08-31 09:22:23 +01001100
1101 auto inputToCellWeights = MakeTensor<float, 2>(tensorInfoInput,
1102 {-0.49770179f, -0.27711356f, -0.09624726f, 0.05100781f,
1103 0.04717243f, 0.48944736f, -0.38535351f,
1104 -0.17212132f});
1105 auto inputToForgetWeights = MakeTensor<float, 2>(tensorInfoInput,
1106 {-0.55291498f, -0.42866567f, 0.13056988f,
1107 -0.3633365f, -0.22755712f, 0.28253698f, 0.24407166f,
1108 0.33826375f});
1109 auto inputToOutputWeights = MakeTensor<float, 2>(tensorInfoInput,
1110 {0.10725588f, -0.02335852f, -0.55932593f,
1111 -0.09426838f, -0.44257352f, 0.54939759f,
1112 0.01533556f, 0.42751634f});
1113 auto cellBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
1114 auto forgetGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {1.f, 1.f, 1.f, 1.f});
1115 auto outputGateBias = MakeTensor<float, 1>(tensorInfoNumUnits, {0.f, 0.f, 0.f, 0.f});
1116
1117 auto recurrentToCellWeights = MakeTensor<float, 2>(tensorInfoOutput,
1118 {0.54066205f, -0.32668582f, -0.43562764f, -0.56094903f, 0.42957711f,
1119 0.01841056f, -0.32764608f, -0.33027974f, -0.10826075f, 0.20675004f,
1120 0.19069612f, -0.03026325f, -0.54532051f, 0.33003211f, 0.44901288f,
1121 0.21193194f});
1122 auto recurrentToForgetWeights = MakeTensor<float, 2>(tensorInfoOutput,
1123 {-0.13832897f, -0.0515101f, -0.2359007f, -0.16661474f, -0.14340827f,
1124 0.36986142f, 0.23414481f, 0.55899f, 0.10798943f, -0.41174671f, 0.17751795f,
1125 -0.34484994f, -0.35874045f, -0.11352962f, 0.27268326f, 0.54058349f});
1126
1127 auto recurrentToOutputWeights = MakeTensor<float, 2>(tensorInfoOutput,
1128 {0.41613156f, 0.42610586f, -0.16495961f, -0.5663873f, 0.30579174f, -0.05115908f,
1129 -0.33941799f, 0.23364776f, 0.11178309f, 0.09481031f, -0.26424935f, 0.46261835f,
1130 0.50248802f, 0.26114327f, -0.43736315f, 0.33149987f});
1131
1132 auto cellToForgetWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
1133 {0.47485286f, -0.51955009f, -0.24458408f, 0.31544167f});
1134 auto cellToOutputWeights = MakeTensor<float, 1>(tensorInfoNumUnits,
1135 {-0.17135078f, 0.82760304f, 0.85573703f, -0.77109635f});
1136
1137 armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfoInput);
1138 armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfoInput);
1139 armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfoInput);
1140
1141 armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfoNumUnits);
1142 armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfoNumUnits);
1143 armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfoNumUnits);
1144
1145 armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfoOutput);
1146 armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfoOutput);
1147 armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfoOutput);
1148
1149
1150 armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfoNumUnits);
1151 armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfoNumUnits);
1152
1153 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1154 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1155 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1156
1157 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1158 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1159 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1160
1161 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1162 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1163 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1164
1165 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1166 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1167
1168
1169 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1170 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1171 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1172
1173 data.m_CellBias = &cellBiasTensor;
1174 data.m_ForgetGateBias = &forgetGateBiasTensor;
1175 data.m_OutputGateBias = &outputGateBiasTensor;
1176
1177 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1178 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1179 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1180
1181 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1182 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1183
1184 // other parameters for the descriptor
1185 data.m_Parameters.m_CifgEnabled = cifgEnabled;
1186 data.m_Parameters.m_ProjectionEnabled = projectionEnabled;
1187 data.m_Parameters.m_PeepholeEnabled = peepholeEnabled;
1188
1189 data.m_Parameters.m_ActivationFunc = 4;
1190 data.m_Parameters.m_ClippingThresProj = 0.0;
1191 data.m_Parameters.m_ClippingThresCell = 0.0;
1192
1193
1194 // List of outputs
1195 std::vector<float> scratchBufferVector(batchSize * scratchBufferSize, 0.f);
1196 auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001197 LayerTestResult<T, 2> ret0(scratchBufferTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001198
1199 // Output state for a certain time step
1200 std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
1201 auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001202 LayerTestResult<T, 2> ret1(outputStateOutTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001203
1204 // Cell state for a certain time step
1205 std::vector<float> cellStateOutVector(batchSize * cellSize, 0.f);
1206 auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001207 LayerTestResult<T, 2> ret2(cellStateOutTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001208
1209 // Output for a certain time step
1210 std::vector<float> outputVector(batchSize * outputSize, 0.f);
1211 auto outputTensor = MakeTensor<float, 2>(outputTensorInfo, outputVector);
1212 std::vector<float> outputData;
1213 outputData.assign(outputExpected.data(), outputExpected.data() + batchSize*outputSize);
Conor Kennedyb9971c92019-05-07 07:14:23 +01001214 LayerTestResult<T, 2> ret3(outputTensorInfo);
telsoa01c577f2c2018-08-31 09:22:23 +01001215 ret3.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputData);
1216
1217 // Prepare the inputs and outputs for the workload
1218 std::unique_ptr<armnn::ITensorHandle> inputHandle =
1219 workloadFactory.CreateTensorHandle(inputTensorInfo);
1220 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1221 workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
1222 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1223 workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
1224
1225 std::unique_ptr<armnn::ITensorHandle> scratchBufferHandle =
1226 workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
1227 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
1228 workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
1229 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
1230 workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
1231 std::unique_ptr<armnn::ITensorHandle> outputHandle =
1232 workloadFactory.CreateTensorHandle(outputTensorInfo);
1233
1234 armnn::WorkloadInfo info;
1235 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1236 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1237 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1238
1239 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchBufferHandle.get());
1240 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
1241 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
1242 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1243
1244 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1245
1246
1247 inputHandle->Allocate();
1248 outputStateInHandle->Allocate();
1249 cellStateInHandle->Allocate();
1250
1251 scratchBufferHandle->Allocate();
1252 outputStateOutHandle->Allocate();
1253 cellStateOutHandle->Allocate();
1254 outputHandle->Allocate();
1255
1256
1257 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1258 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1259 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1260
1261 CopyDataToITensorHandle(scratchBufferHandle.get(), &scratchBufferTensor[0][0]);
1262 CopyDataToITensorHandle(outputStateOutHandle.get(), &outputStateOutTensor[0][0]);
1263 CopyDataToITensorHandle(cellStateOutHandle.get(), &cellStateOutTensor[0][0]);
1264
telsoa01c577f2c2018-08-31 09:22:23 +01001265 workload->Execute();
1266
1267 CopyDataFromITensorHandle(&ret0.output[0][0], scratchBufferHandle.get());
1268 CopyDataFromITensorHandle(&ret1.output[0][0], outputStateOutHandle.get());
1269 CopyDataFromITensorHandle(&ret2.output[0][0], cellStateOutHandle.get());
1270 CopyDataFromITensorHandle(&ret3.output[0][0], outputHandle.get());
1271
1272 return ret3;
1273}
Jan Eilers38e05bd2019-06-26 13:10:09 +01001274
Jan Eilers38e05bd2019-06-26 13:10:09 +01001275template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
1276LayerTestResult<T, 2>
1277LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl(armnn::IWorkloadFactory& workloadFactory,
1278 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1279 const boost::multi_array<T, 2>& input,
1280 const boost::multi_array<T, 2>& outputExpected,
1281 float qScale = 0.0f,
1282 int32_t qOffset = 0,
1283 armnn::DataType constantDataType = armnn::DataType::Float32)
1284{
1285 unsigned int batchSize = 2;
1286 unsigned int outputSize = 3;
1287 unsigned int inputSize = 5;
1288 unsigned numUnits = 4;
1289
1290 armnn::TensorInfo inputTensorInfo({batchSize , inputSize}, ArmnnType, qScale, qOffset);
1291 armnn::TensorInfo cellStateInTensorInfo({batchSize , numUnits}, ArmnnType, qScale, qOffset);
1292 armnn::TensorInfo outputStateInTensorInfo({batchSize , outputSize}, ArmnnType, qScale, qOffset);
1293
1294 // Scratch buffer size without CIFG [batchSize, numUnits * 4]
1295 armnn::TensorInfo scratchBufferTensorInfo({batchSize, numUnits * 4}, ArmnnType, qScale, qOffset);
1296 armnn::TensorInfo cellStateOutTensorInfo({batchSize, numUnits}, ArmnnType, qScale, qOffset);
1297 armnn::TensorInfo outputStateOutTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1298 armnn::TensorInfo outputTensorInfo({batchSize, outputSize}, ArmnnType, qScale, qOffset);
1299
1300 LayerTestResult<T, 2> ret(outputTensorInfo);
1301
1302 std::vector<float> inputVector;
1303 inputVector.assign(input.data(), input.data() + (batchSize * inputSize));
1304 auto inputTensor = MakeTensor<float,2>(inputTensorInfo, inputVector);
1305
1306 std::vector<float> cellStateInVector(batchSize * numUnits, 0.f);
1307 auto cellStateInTensor = MakeTensor<float,2>(cellStateInTensorInfo, cellStateInVector);
1308
1309 std::vector<float> outputStateInVector(batchSize * outputSize, 0.f);
1310 auto outputStateInTensor = MakeTensor<float,2>(outputStateInTensorInfo, outputStateInVector);
1311
1312 std::vector<float> scratchBufferVector(batchSize * numUnits * 4, 0.f);
1313 auto scratchBufferTensor = MakeTensor<float,2>(scratchBufferTensorInfo, scratchBufferVector);
1314
1315 std::vector<float> outputStateOutVector(batchSize * outputSize, 0.f);
1316 auto outputStateOutTensor = MakeTensor<float,2>(outputStateOutTensorInfo, outputStateOutVector);
1317
1318 std::vector<float> cellStateOutVector(batchSize * numUnits, 0.f);
1319 auto cellStateOutTensor = MakeTensor<float,2>(cellStateOutTensorInfo, cellStateOutVector);
1320
1321 std::vector<float> outputVector;
1322 outputVector.assign(outputExpected.data(), outputExpected.data() + (batchSize * outputSize));
1323 ret.outputExpected = MakeTensor<float, 2>(outputTensorInfo, outputVector);
1324
1325 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputTensorInfo);
1326 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1327 workloadFactory.CreateTensorHandle(cellStateInTensorInfo);
1328 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1329 workloadFactory.CreateTensorHandle(outputStateInTensorInfo);
1330
1331 std::unique_ptr<armnn::ITensorHandle> scratchHandle = workloadFactory.CreateTensorHandle(scratchBufferTensorInfo);
1332 std::unique_ptr<armnn::ITensorHandle> outputStateOutHandle =
1333 workloadFactory.CreateTensorHandle(outputStateOutTensorInfo);
1334 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
1335 workloadFactory.CreateTensorHandle(cellStateOutTensorInfo);
1336 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputTensorInfo);
1337
1338 armnn::LstmQueueDescriptor data;
1339 armnn::WorkloadInfo info;
1340
1341 AddInputToWorkload(data, info, inputTensorInfo, inputHandle.get());
1342 AddInputToWorkload(data, info, outputStateInTensorInfo, outputStateInHandle.get());
1343 AddInputToWorkload(data, info, cellStateInTensorInfo, cellStateInHandle.get());
1344
1345 AddOutputToWorkload(data, info, scratchBufferTensorInfo, scratchHandle.get());
1346 AddOutputToWorkload(data, info, outputStateOutTensorInfo, outputStateOutHandle.get());
1347 AddOutputToWorkload(data, info, cellStateOutTensorInfo, cellStateOutHandle.get());
1348 AddOutputToWorkload(data, info, outputTensorInfo, outputHandle.get());
1349
1350 armnn::TensorInfo tensorInfo3({outputSize}, constantDataType, qScale, qOffset);
1351 armnn::TensorInfo tensorInfo4({numUnits}, constantDataType, qScale, qOffset);
1352 armnn::TensorInfo tensorInfo4x5({numUnits, inputSize}, constantDataType, qScale, qOffset);
1353 armnn::TensorInfo tensorInfo4x3({numUnits, outputSize}, constantDataType, qScale, qOffset);
1354 armnn::TensorInfo tensorInfo3x4({outputSize, numUnits}, constantDataType, qScale, qOffset);
1355
1356 auto inputToInputWeights =
1357 MakeTensor<float, 2>(tensorInfo4x5, { 0.5f, 0.6f, 0.7f, -0.8f, -0.9f,
1358 0.1f, 0.2f, 0.3f, -0.4f, 0.5f,
1359 -0.8f, 0.7f, -0.6f, 0.5f, -0.4f,
1360 -0.5f, -0.4f, -0.3f, -0.2f, -0.1f}); //{numUnits, inputSize}
1361
1362 auto inputToForgetWeights =
1363 MakeTensor<float, 2>(tensorInfo4x5, {-0.6f, -0.1f, 0.3f, 0.2f, 0.9f,
1364 -0.5f, -0.2f, -0.4f, 0.3f, -0.8f,
1365 -0.4f, 0.3f, -0.5f, -0.4f, -0.6f,
1366 0.3f, -0.4f, -0.6f, -0.5f, -0.5f}); //{numUnits, inputSize}
1367
1368 auto inputToCellWeights =
1369 MakeTensor<float, 2>(tensorInfo4x5, {-0.4f, -0.3f, -0.2f, -0.1f, -0.5f,
1370 0.5f, -0.2f, -0.3f, -0.2f, -0.6f,
1371 0.6f, -0.1f, -0.4f, -0.3f, -0.7f,
1372 0.7f, -0.9f, -0.5f, 0.8f, 0.6f}); //{numUnits, inputSize}
1373
1374 auto inputToOutputWeights =
1375 MakeTensor<float, 2>(tensorInfo4x5, {-0.8f, -0.4f, -0.2f, -0.9f, -0.1f,
1376 -0.7f, 0.3f, -0.3f, -0.8f, -0.2f,
1377 0.6f, -0.2f, 0.4f, -0.7f, -0.3f,
1378 -0.5f, 0.1f, 0.5f, -0.6f, -0.4f}); //{numUnits, inputSize}
1379
1380 auto inputGateBias =
1381 MakeTensor<float, 1>(tensorInfo4, {0.03f, 0.15f, 0.22f, 0.38f}); //{numUnits}
1382
1383 auto forgetGateBias =
1384 MakeTensor<float, 1>(tensorInfo4, {0.1f, -0.3f, -0.2f, 0.1f}); //{numUnits}
1385
1386 auto cellBias =
1387 MakeTensor<float, 1>(tensorInfo4, {-0.05f, 0.72f, 0.25f, 0.08f}); //{numUnits}
1388
1389 auto outputGateBias =
1390 MakeTensor<float, 1>(tensorInfo4, {0.05f, -0.01f, 0.2f, 0.1f}); //{numUnits}
1391
1392 auto recurrentToInputWeights =
1393 MakeTensor<float, 2>(tensorInfo4x3, {-0.2f, -0.3f, 0.4f,
1394 0.1f, -0.5f, 0.9f,
1395 -0.2f, -0.3f, -0.7f,
1396 0.05f, -0.2f, -0.6f}); //{numUnits, outputSize}
1397
1398 auto recurrentToCellWeights =
1399 MakeTensor<float, 2>(tensorInfo4x3, {-0.3f, 0.2f, 0.1f,
1400 -0.3f, 0.8f, -0.08f,
1401 -0.2f, 0.3f, 0.8f,
1402 -0.6f, -0.1f, 0.2f}); //{numUnits, outputSize}
1403
1404 auto recurrentToForgetWeights =
1405 MakeTensor<float, 2>(tensorInfo4x3, {-0.5f, -0.3f, -0.5f,
1406 -0.2f, 0.6f, 0.4f,
1407 0.9f, 0.3f, -0.1f,
1408 0.2f, 0.5f, 0.2f}); //{numUnits, outputSize}
1409
1410 auto recurrentToOutputWeights =
1411 MakeTensor<float, 2>(tensorInfo4x3, { 0.3f, -0.1f, 0.1f,
1412 -0.2f, -0.5f, -0.7f,
1413 -0.2f, -0.6f, -0.1f,
1414 -0.4f, -0.7f, -0.2f}); //{numUnits, outputSize}
1415
1416 auto cellToInputWeights =
1417 MakeTensor<float, 1>(tensorInfo4, {0.05f, 0.1f, 0.25f, 0.15f}); //{numUnits}
1418
1419 auto cellToForgetWeights =
1420 MakeTensor<float, 1>(tensorInfo4, {-0.02f, -0.15f, -0.25f, -0.03f}); //{numUnits}
1421
1422 auto cellToOutputWeights =
1423 MakeTensor<float, 1>(tensorInfo4, {0.1f, -0.1f, -0.5f, 0.05f}); //{numUnits}
1424
1425 auto projectionWeights =
1426 MakeTensor<float, 2>(tensorInfo3x4,
1427 {-0.1f, 0.2f, 0.01f, -0.2f,
1428 0.1f, 0.5f, 0.3f, 0.08f,
1429 0.07f, 0.2f, -0.4f, 0.2f}); //{outputSize, numUnits}
1430
1431 std::vector<float> projectionBiasVector(outputSize, 0.f);
1432 auto projectionBias = MakeTensor<float,1>(tensorInfo3, projectionBiasVector); //{outputSize}
1433
1434 auto inputLayerNormWeights =
1435 MakeTensor<float, 1>(tensorInfo4, {0.1f, 0.2f, 0.3f, 0.5f}); //{numUnits}
1436
1437 auto forgetLayerNormWeights =
1438 MakeTensor<float, 1>(tensorInfo4, {0.2f, 0.2f, 0.4f, 0.3f}); //{numUnits}
1439
1440 auto cellLayerNormWeights =
1441 MakeTensor<float, 1>(tensorInfo4, {0.7f, 0.2f, 0.3f, 0.8f}); //{numUnits}
1442
1443 auto outputLayerNormWeights =
1444 MakeTensor<float, 1>(tensorInfo4, {0.6f, 0.2f, 0.2f, 0.5f}); //{numUnits}
1445
1446
1447 armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(tensorInfo4x5);
1448 armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(tensorInfo4x5);
1449 armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(tensorInfo4x5);
1450 armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(tensorInfo4x5);
1451 armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(tensorInfo4x3);
1452 armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(tensorInfo4x3);
1453 armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(tensorInfo4x3);
1454 armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(tensorInfo4x3);
1455 armnn::ScopedCpuTensorHandle cellToInputWeightsTensor(tensorInfo4);
1456 armnn::ScopedCpuTensorHandle inputGateBiasTensor(tensorInfo4);
1457 armnn::ScopedCpuTensorHandle forgetGateBiasTensor(tensorInfo4);
1458 armnn::ScopedCpuTensorHandle cellBiasTensor(tensorInfo4);
1459 armnn::ScopedCpuTensorHandle outputGateBiasTensor(tensorInfo4);
1460 armnn::ScopedCpuTensorHandle cellToForgetWeightsTensor(tensorInfo4);
1461 armnn::ScopedCpuTensorHandle cellToOutputWeightsTensor(tensorInfo4);
1462 armnn::ScopedCpuTensorHandle projectionWeightsTensor(tensorInfo3x4);
1463 armnn::ScopedCpuTensorHandle projectionBiasTensor(tensorInfo3);
1464
1465 armnn::ScopedCpuTensorHandle inputLayerNormWeightsTensor(tensorInfo4);
1466 armnn::ScopedCpuTensorHandle forgetLayerNormWeightsTensor(tensorInfo4);
1467 armnn::ScopedCpuTensorHandle cellLayerNormWeightsTensor(tensorInfo4);
1468 armnn::ScopedCpuTensorHandle outputLayerNormWeightsTensor(tensorInfo4);
1469
1470 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
1471 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1472 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1473 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1474 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
1475 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1476 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1477 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1478 AllocateAndCopyDataToITensorHandle(&cellToInputWeightsTensor, &cellToInputWeights[0]);
1479 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
1480 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1481 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1482 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1483 AllocateAndCopyDataToITensorHandle(&cellToForgetWeightsTensor, &cellToForgetWeights[0]);
1484 AllocateAndCopyDataToITensorHandle(&cellToOutputWeightsTensor, &cellToOutputWeights[0]);
1485 AllocateAndCopyDataToITensorHandle(&projectionWeightsTensor, &projectionWeights[0][0]);
1486 AllocateAndCopyDataToITensorHandle(&projectionBiasTensor, &projectionBias[0]);
1487
1488 AllocateAndCopyDataToITensorHandle(&inputLayerNormWeightsTensor, &inputLayerNormWeights[0]);
1489 AllocateAndCopyDataToITensorHandle(&forgetLayerNormWeightsTensor, &forgetLayerNormWeights[0]);
1490 AllocateAndCopyDataToITensorHandle(&cellLayerNormWeightsTensor, &cellLayerNormWeights[0]);
1491 AllocateAndCopyDataToITensorHandle(&outputLayerNormWeightsTensor, &outputLayerNormWeights[0]);
1492
1493 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1494 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1495 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1496 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1497 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1498 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1499 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1500 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1501 data.m_CellToInputWeights = &cellToInputWeightsTensor;
1502 data.m_InputGateBias = &inputGateBiasTensor;
1503 data.m_ForgetGateBias = &forgetGateBiasTensor;
1504 data.m_CellBias = &cellBiasTensor;
1505 data.m_OutputGateBias = &outputGateBiasTensor;
1506 data.m_CellToForgetWeights = &cellToForgetWeightsTensor;
1507 data.m_CellToOutputWeights = &cellToOutputWeightsTensor;
1508 data.m_ProjectionWeights = &projectionWeightsTensor;
1509 data.m_ProjectionBias = &projectionBiasTensor;
1510
1511 data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
1512 data.m_ForgetLayerNormWeights = &forgetLayerNormWeightsTensor;
1513 data.m_CellLayerNormWeights = &cellLayerNormWeightsTensor;
1514 data.m_OutputLayerNormWeights = &outputLayerNormWeightsTensor;
1515
1516 // Flags to set test configuration
1517 data.m_Parameters.m_ActivationFunc = 4;
1518 data.m_Parameters.m_CifgEnabled = false;
1519 data.m_Parameters.m_PeepholeEnabled = true;
1520 data.m_Parameters.m_ProjectionEnabled = true;
1521 data.m_Parameters.m_LayerNormEnabled = true;
1522
1523
1524 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateLstm(data, info);
1525 inputHandle->Allocate();
1526 outputStateInHandle->Allocate();
1527 cellStateInHandle->Allocate();
1528
1529 scratchHandle->Allocate();
1530 outputStateOutHandle->Allocate();
1531 cellStateOutHandle->Allocate();
1532 outputHandle->Allocate();
1533
1534 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1535 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1536 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1537
1538 workload->Execute();
1539
1540 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1541
1542 return ret;
James Conroy9c3cae82019-08-01 16:01:48 +01001543}
1544
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001545LayerTestResult<uint8_t, 2> QuantizedLstmTestImpl(
1546 armnn::IWorkloadFactory& workloadFactory,
1547 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager,
1548 const boost::multi_array<uint8_t, 2>& input,
1549 const boost::multi_array<uint8_t, 2>& outputExpected)
James Conroy9c3cae82019-08-01 16:01:48 +01001550{
James Conroy9c3cae82019-08-01 16:01:48 +01001551 auto numBatches = boost::numeric_cast<unsigned int>(input.shape()[0]);
1552 auto inputSize = boost::numeric_cast<unsigned int>(input.shape()[1]);
1553 auto outputSize = boost::numeric_cast<unsigned int>(outputExpected.shape()[1]);
1554
1555 // Scale/Offset for input/output, cellState In/Out, weights, bias
1556 float inputOutputScale = 0.0078125f;
1557 int32_t inputOutputOffset = 128;
1558
1559 float cellStateScale = 0.00048828125f;
1560 int32_t cellStateOffset = 0;
1561
1562 float weightsScale = 0.00408021f;
1563 int32_t weightsOffset = 100;
1564
1565 float biasScale = 3.1876640625e-05f;
1566 int32_t biasOffset = 0;
1567
1568 // Input/Output tensor info
1569 armnn::TensorInfo inputInfo({numBatches , inputSize},
1570 armnn::DataType::QuantisedAsymm8,
1571 inputOutputScale,
1572 inputOutputOffset);
1573
1574 armnn::TensorInfo cellStateInfo({numBatches , outputSize},
1575 armnn::DataType::QuantisedSymm16,
1576 cellStateScale,
1577 cellStateOffset);
1578
1579 armnn::TensorInfo outputStateInfo({numBatches , outputSize},
1580 armnn::DataType::QuantisedAsymm8,
1581 inputOutputScale,
1582 inputOutputOffset);
1583
1584 LayerTestResult<uint8_t, 2> ret(outputStateInfo);
1585
1586 // Input0
1587 std::vector<uint8_t> inputVector;
1588 inputVector.assign(input.data(), input.data() + (numBatches * inputSize));
1589 auto inputTensor = MakeTensor<uint8_t, 2>(inputInfo, inputVector);
1590
1591 // Input1
1592 std::vector<int16_t> cellStateInVector = {876, 1034, 955, -909, 761, 1029, 796, -1036}; // 13
1593 auto cellStateInTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateInVector);
1594
1595 // Input2
1596 std::vector<uint8_t> outputStateInVector = {136, 150, 140, 115, 135, 152, 138, 112}; // 14
1597 auto outputStateInTensor = MakeTensor<uint8_t, 2>(outputStateInfo, outputStateInVector);
1598
1599 // Output0
1600 std::vector<int16_t> cellStateOutVector = {1485, 1177, 1373, -1023, 1019, 1355, 1097, -1235}; // 0
1601 auto cellStateOutTensor = MakeTensor<int16_t, 2>(cellStateInfo, cellStateOutVector);
1602
1603 // Output1
1604 std::vector<uint8_t> outputVector; // 1
1605 outputVector.assign(outputExpected.data(), outputExpected.data() + (numBatches * outputSize));
1606 ret.outputExpected = MakeTensor<uint8_t, 2>(outputStateInfo, outputVector);
1607
1608 // Create tensor handles
1609 std::unique_ptr<armnn::ITensorHandle> inputHandle = workloadFactory.CreateTensorHandle(inputInfo);
1610 std::unique_ptr<armnn::ITensorHandle> cellStateInHandle =
1611 workloadFactory.CreateTensorHandle(cellStateInfo);
1612 std::unique_ptr<armnn::ITensorHandle> outputStateInHandle =
1613 workloadFactory.CreateTensorHandle(outputStateInfo);
1614
1615 std::unique_ptr<armnn::ITensorHandle> cellStateOutHandle =
1616 workloadFactory.CreateTensorHandle(cellStateInfo);
1617 std::unique_ptr<armnn::ITensorHandle> outputHandle = workloadFactory.CreateTensorHandle(outputStateInfo);
1618
1619 armnn::QuantizedLstmQueueDescriptor data;
1620 armnn::WorkloadInfo info;
1621
1622 // Add inputs and outputs to workload
1623 AddInputToWorkload(data, info, inputInfo, inputHandle.get());
1624 AddInputToWorkload(data, info, cellStateInfo, cellStateInHandle.get());
1625 AddInputToWorkload(data, info, outputStateInfo, outputStateInHandle.get());
1626
1627 AddOutputToWorkload(data, info, cellStateInfo, cellStateOutHandle.get());
1628 AddOutputToWorkload(data, info, outputStateInfo, outputHandle.get());
1629
1630 // Weights and bias tensor and quantization info
1631 armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
1632 armnn::DataType::QuantisedAsymm8,
1633 weightsScale,
1634 weightsOffset);
1635
1636 armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
1637 armnn::DataType::QuantisedAsymm8,
1638 weightsScale,
1639 weightsOffset);
1640
1641 armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset);
1642
1643 // Weights and bias tensor data
1644 auto inputToInputWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {146, 250, 235, 171, 10, 218, 171, 108});
1645 auto inputToForgetWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {24, 50, 132, 179, 158, 110, 3, 169});
1646 auto inputToCellWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {133, 34, 29, 49, 206, 109, 54, 183});
1647 auto inputToOutputWeights = MakeTensor<uint8_t, 2>(inputWeightsInfo, {195, 187, 11, 99, 109, 10, 218, 48});
1648
1649 auto recurrentToInputWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1650 {254, 206, 77, 168, 71, 20, 215, 6, 223, 7, 118, 225, 59, 130, 174, 26});
1651 auto recurrentToForgetWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1652 {137, 240, 103, 52, 68, 51, 237, 112, 0, 220, 89, 23, 69, 4, 207, 253});
1653 auto recurrentToCellWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1654 {172, 60, 205, 65, 14, 0, 140, 168, 240, 223, 133, 56, 142, 64, 246, 216});
1655 auto recurrentToOutputWeights = MakeTensor<uint8_t, 2>(recurrentWeightsInfo,
1656 {106, 214, 67, 23, 59, 158, 45, 3, 119, 132, 49, 205, 129, 218, 11, 98});
1657
1658 auto inputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-7876, 13488, -726, 32839});
1659 auto forgetGateBias = MakeTensor<int32_t, 1>(biasInfo, {9206, -46884, -11693, -38724});
1660 auto cellBias = MakeTensor<int32_t, 1>(biasInfo, {39481, 48624, 48976, -21419});
1661 auto outputGateBias = MakeTensor<int32_t, 1>(biasInfo, {-58999, -17050, -41852, -40538});
1662
1663 // ScopedCpuTensorHandles
1664 armnn::ScopedCpuTensorHandle inputToInputWeightsTensor(inputWeightsInfo);
1665 armnn::ScopedCpuTensorHandle inputToForgetWeightsTensor(inputWeightsInfo);
1666 armnn::ScopedCpuTensorHandle inputToCellWeightsTensor(inputWeightsInfo);
1667 armnn::ScopedCpuTensorHandle inputToOutputWeightsTensor(inputWeightsInfo);
1668
1669 armnn::ScopedCpuTensorHandle recurrentToInputWeightsTensor(recurrentWeightsInfo);
1670 armnn::ScopedCpuTensorHandle recurrentToForgetWeightsTensor(recurrentWeightsInfo);
1671 armnn::ScopedCpuTensorHandle recurrentToCellWeightsTensor(recurrentWeightsInfo);
1672 armnn::ScopedCpuTensorHandle recurrentToOutputWeightsTensor(recurrentWeightsInfo);
1673
1674 armnn::ScopedCpuTensorHandle inputGateBiasTensor(biasInfo);
1675 armnn::ScopedCpuTensorHandle forgetGateBiasTensor(biasInfo);
1676 armnn::ScopedCpuTensorHandle cellBiasTensor(biasInfo);
1677 armnn::ScopedCpuTensorHandle outputGateBiasTensor(biasInfo);
1678
1679 // Allocate and copy data
1680 AllocateAndCopyDataToITensorHandle(&inputToInputWeightsTensor, &inputToInputWeights[0][0]);
1681 AllocateAndCopyDataToITensorHandle(&inputToForgetWeightsTensor, &inputToForgetWeights[0][0]);
1682 AllocateAndCopyDataToITensorHandle(&inputToCellWeightsTensor, &inputToCellWeights[0][0]);
1683 AllocateAndCopyDataToITensorHandle(&inputToOutputWeightsTensor, &inputToOutputWeights[0][0]);
1684
1685 AllocateAndCopyDataToITensorHandle(&recurrentToInputWeightsTensor, &recurrentToInputWeights[0][0]);
1686 AllocateAndCopyDataToITensorHandle(&recurrentToForgetWeightsTensor, &recurrentToForgetWeights[0][0]);
1687 AllocateAndCopyDataToITensorHandle(&recurrentToCellWeightsTensor, &recurrentToCellWeights[0][0]);
1688 AllocateAndCopyDataToITensorHandle(&recurrentToOutputWeightsTensor, &recurrentToOutputWeights[0][0]);
1689
1690 AllocateAndCopyDataToITensorHandle(&inputGateBiasTensor, &inputGateBias[0]);
1691 AllocateAndCopyDataToITensorHandle(&forgetGateBiasTensor, &forgetGateBias[0]);
1692 AllocateAndCopyDataToITensorHandle(&cellBiasTensor, &cellBias[0]);
1693 AllocateAndCopyDataToITensorHandle(&outputGateBiasTensor, &outputGateBias[0]);
1694
1695 // Setup queue descriptor
1696 data.m_InputToInputWeights = &inputToInputWeightsTensor;
1697 data.m_InputToForgetWeights = &inputToForgetWeightsTensor;
1698 data.m_InputToCellWeights = &inputToCellWeightsTensor;
1699 data.m_InputToOutputWeights = &inputToOutputWeightsTensor;
1700
1701 data.m_RecurrentToInputWeights = &recurrentToInputWeightsTensor;
1702 data.m_RecurrentToForgetWeights = &recurrentToForgetWeightsTensor;
1703 data.m_RecurrentToCellWeights = &recurrentToCellWeightsTensor;
1704 data.m_RecurrentToOutputWeights = &recurrentToOutputWeightsTensor;
1705
1706 data.m_InputGateBias = &inputGateBiasTensor;
1707 data.m_ForgetGateBias = &forgetGateBiasTensor;
1708 data.m_CellBias = &cellBiasTensor;
1709 data.m_OutputGateBias = &outputGateBiasTensor;
1710
1711 // Create workload and allocate tensor handles
1712 std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateQuantizedLstm(data, info);
1713 inputHandle->Allocate();
1714 outputStateInHandle->Allocate();
1715 cellStateInHandle->Allocate();
1716
1717 cellStateOutHandle->Allocate();
1718 outputHandle->Allocate();
1719
1720 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]);
1721 CopyDataToITensorHandle(outputStateInHandle.get(), &outputStateInTensor[0][0]);
1722 CopyDataToITensorHandle(cellStateInHandle.get(), &cellStateInTensor[0][0]);
1723
1724 workload->Execute();
1725
1726 CopyDataFromITensorHandle(&ret.output[0][0], outputHandle.get());
1727
1728 return ret;
Aron Virginas-Tar00d306e2019-08-28 18:08:46 +01001729}
1730
1731} // anonymous namespace
1732
1733#if defined(ARMNNREF_ENABLED)
1734
1735// The LSTM test units are run only for the reference backend at the moment
1736
1737void LstmUtilsZeroVectorTest()
1738{
1739 armnn::TensorInfo inputDesc({4}, armnn::DataType::Float32);
1740 boost::multi_array<float, 1> input = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1741 {2., 3., 3., 4.}));
1742
1743 boost::multi_array<float, 1> expectedOutput = MakeTensor<float, 1>(inputDesc, std::vector<float>(
1744 {0., 0., 0., 0.}));
1745
1746 return LstmUtilsZeroVectorTestImpl<armnn::DataType::Float32>(input, 4, expectedOutput);
1747}
1748
1749void LstmUtilsMeanStddevNormalizationNoneZeroInputTest()
1750{
1751 uint32_t batchSize = 2;
1752 uint32_t vecSize = 4;
1753 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1754 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1755 { 0.1f, 0.2f, 0.3f, 0.4f, //batch 0
1756 0.9f, 1.0f, 1.1f, 1.2f })); //batch 1
1757
1758 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1759 { -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f, //batch 0
1760 -1.34163153f, -0.447210163f, 0.447211236f, 1.3416326f })); //batch 1
1761
1762 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1763 vecSize, batchSize, expectedOutput);
1764}
1765
1766void LstmUtilsMeanStddevNormalizationAllZeroInputTest()
1767{
1768 uint32_t batchSize = 2;
1769 uint32_t vecSize = 4;
1770 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1771 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1772 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1773 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
1774
1775 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1776 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1777 0.0f, 0.0f, 0.0f, 0.0f })); //batch 1
1778
1779 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1780 vecSize, batchSize, expectedOutput);
1781}
1782
1783void LstmUtilsMeanStddevNormalizationMixedZeroInputTest()
1784{
1785 uint32_t batchSize = 2;
1786 uint32_t vecSize = 4;
1787 armnn::TensorInfo inputDesc({batchSize, vecSize}, armnn::DataType::Float32);
1788 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1789 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1790 0.1f, 0.2f, 0.3f, 0.4f })); //batch 1
1791
1792 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1793 { 0.0f, 0.0f, 0.0f, 0.0f, //batch 0
1794 -1.34164071f, -0.447213531f, 0.44721365f, 1.34164071f })); //batch 1
1795
1796 return LstmUtilsMeanStddevNormalizationTestImpl<armnn::DataType::Float32>(input,
1797 vecSize, batchSize, expectedOutput);
1798}
1799
1800void LstmUtilsVectorBatchVectorCwiseProductTest()
1801{
1802 uint32_t batchSize = 4;
1803 uint32_t vecSize = 29;
1804 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1805 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1806 { 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
1807 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
1808 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f}));
1809
1810 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1811 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1812 { /* batch 0 */
1813 1.1f, 2.2f, 3.3f, 4.4f, 5.5f, 6.6f, 7.7f, 8.8f, 9.9f, 10.1f,
1814 11.11f, 12.12f, 13.13f, 14.14f, 15.15f, 16.16f, 17.17f, 18.18f, 19.19f, 20.2f,
1815 21.21f, 22.22f, 23.23f, 24.24f, 25.25f, 26.26f, 27.27f, 28.28f, 0.0f,
1816 /* batch 1 */
1817 -1.1f, -2.2f, -3.3f, -4.4f, -5.5f, -6.6f, -7.7f, -8.8f, -9.9f, -10.1f,
1818 -11.11f, -12.12f, -13.13f, -14.14f, -15.15f, -16.16f, -17.17f, -18.18f, -19.19f, -20.2f,
1819 -21.21f, -22.22f, -23.23f, -24.24f, -25.25f, -26.26f, -27.27f, -28.28f, 0.0f,
1820 /* batch 2 */
1821 1.1f, -2.2f, 3.3f, -4.4f, 5.5f, -6.6f, 7.7f, -8.8f, 9.9f, -10.1f,
1822 11.11f, -12.12f, 13.13f, -14.14f, 15.15f, -16.16f, 17.17f, -18.18f, 19.19f, -20.2f,
1823 21.21f, -22.22f, 23.23f, -24.24f, 25.25f, -26.26f, 27.27f, -28.28f, 0.0f,
1824 /* batch 3 */
1825 -1.1f, 2.2f, -3.3f, 4.4f, -5.5f, 6.6f, -7.7f, 8.8f, -9.9f, 10.1f,
1826 -11.11f, 12.12f, -13.13f, 14.14f, -15.15f, 16.16f, -17.17f, 18.18f, -19.19f, 20.2f,
1827 -21.21f, 22.22f, -23.23f, 24.24f, -25.25f, 26.26f, -27.27f, 28.28f, 0.0f}));
1828
1829 // Expect output = input * output + output.
1830 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1831 { /* batch 0 */
1832 1.210000f, 4.840000f, 10.889999f, 19.360001f, 30.250000f, 43.559998f,
1833 59.289997f, 77.440002f, 98.009995f, 102.010010f, 123.432091f, 146.894394f,
1834 172.396896f, 199.939606f, 229.522491f, 261.145599f, 294.808899f, 330.512421f,
1835 368.256134f, 408.040039f, 449.864075f, 493.728363f, 539.632874f, 587.577576f,
1836 637.562500f, 689.587585f, 743.652954f, 799.758423f, 0.000000f,
1837 /* batch 1 */
1838 -1.210000f, -4.840000f, -10.889999f, -19.360001f, -30.250000f, -43.559998f,
1839 -59.289997f, -77.440002f, -98.009995f, -102.010010f, -123.432091f, -146.894394f,
1840 -172.396896f, -199.939606f, -229.522491f, -261.145599f, -294.808899f, -330.512421f,
1841 -368.256134f, -408.040039f, -449.864075f, -493.728363f, -539.632874f, -587.577576f,
1842 -637.562500f, -689.587585f, -743.652954f, -799.758423f, 0.000000f,
1843 /* batch 2 */
1844 1.210000f, -4.840000f, 10.889999f, -19.360001f, 30.250000f, -43.559998f,
1845 59.289997f, -77.440002f, 98.009995f, -102.010010f, 123.432091f, -146.894394f,
1846 172.396896f, -199.939606f, 229.522491f, -261.145599f, 294.808899f, -330.512421f,
1847 368.256134f, -408.040039f, 449.864075f, -493.728363f, 539.632874f, -587.577576f,
1848 637.562500f, -689.587585f, 743.652954f, -799.758423f, 0.000000f,
1849 /* batch 3 */
1850 -1.210000f, 4.840000f, -10.889999f, 19.360001f, -30.250000f, 43.559998f,
1851 -59.289997f, 77.440002f, -98.009995f, 102.010010f, -123.432091f, 146.894394f,
1852 -172.396896f, 199.939606f, -229.522491f, 261.145599f, -294.808899f, 330.512421f,
1853 -368.256134f, 408.040039f, -449.864075f, 493.728363f, -539.632874f, 587.577576f,
1854 -637.562500f, 689.587585f, -743.652954f, 799.758423f, 0.000000f}));
1855
1856 return LstmUtilsVectorBatchVectorCwiseProductTestImpl<armnn::DataType::Float32>(vector, batchVector,
1857 vecSize, batchSize, expectedOutput);
1858}
1859
1860void LstmUtilsVectorBatchVectorAddTest()
1861{
1862 uint32_t batchSize = 2;
1863 uint32_t vecSize = 3;
1864 armnn::TensorInfo vecDesc({vecSize}, armnn::DataType::Float32);
1865 boost::multi_array<float, 1> vector = MakeTensor<float, 1>(vecDesc, std::vector<float>(
1866 { 0.0f, -0.5f, 1.0f}));
1867
1868 armnn::TensorInfo batchVecDesc({batchSize, vecSize}, armnn::DataType::Float32);
1869 boost::multi_array<float, 2> batchVector = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1870 { 1.0f, 2.0f, 3.0f, //batch 0
1871 4.0f, 5.0f, 6.0f})); //batch 1
1872
1873 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(batchVecDesc, std::vector<float>(
1874 { 1.0f, 1.5f, 4.0f,
1875 4.0f, 4.5f, 7.0f}));
1876
1877 return LstmUtilsVectorBatchVectorAddTestImpl<armnn::DataType::Float32>(vector, batchVector,
1878 vecSize, batchSize, expectedOutput);
1879}
1880
1881#endif
1882
1883LayerTestResult<float, 2> LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest(
1884 armnn::IWorkloadFactory& workloadFactory,
1885 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1886{
1887 armnn::TensorInfo inputDesc({ 2, 2 }, armnn::DataType::Float32);
1888 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1889 { 2., 3., 3., 4. }));
1890
1891 armnn::TensorInfo outputDesc({ 2, 4 }, armnn::DataType::Float32);
1892 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1893 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1894 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f}));
1895 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
1896 workloadFactory, memoryManager, input, expectedOutput);
1897}
1898
1899LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest(
1900 armnn::IWorkloadFactory& workloadFactory,
1901 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1902{
1903 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
1904 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1905 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
1906 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f}));
1907
1908 armnn::TensorInfo outputDesc({ 2, 16 }, armnn::DataType::Float32);
1909 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1910 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
1911 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
1912 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
1913 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
1914 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
1915 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f,
1916 0.02168f}));
1917 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<armnn::DataType::Float32>(
1918 workloadFactory, memoryManager, input, expectedOutput);
1919}
1920
1921LayerTestResult<float, 2> LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest(
1922 armnn::IWorkloadFactory& workloadFactory,
1923 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1924{
1925 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::Float32);
1926 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1927 {2., 3., 3., 4.}));
1928
1929 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::Float32);
1930 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1931 {{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1932 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}}));
1933
1934 return LstmNoCifgNoPeepholeNoProjectionTestImpl<armnn::DataType::Float32>(
1935 workloadFactory, memoryManager, input, expectedOutput);
1936}
1937
1938LayerTestResult<float, 2> LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest(
1939 armnn::IWorkloadFactory& workloadFactory,
1940 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1941{
1942 armnn::TensorInfo inputDesc({ 2, 5 }, armnn::DataType::Float32);
1943 boost::multi_array<float, 2> input = MakeTensor<float, 2>(inputDesc, std::vector<float>(
1944 {0.7f, 0.8f, 0.1f, 0.2f, 0.3f, //batch 0
1945 0.3f, 0.2f, 0.9f, 0.8f, 0.1f})); //batch 1
1946
1947 armnn::TensorInfo outputDesc({ 2, 3 }, armnn::DataType::Float32);
1948 boost::multi_array<float, 2> expectedOutput = MakeTensor<float, 2>(outputDesc, std::vector<float>(
1949 { 0.0244077f, 0.128027f, -0.00170918f, //batch 0
1950 -0.00692428f, 0.0848741f, 0.063445f})); //batch 1
1951 return LstmLayerNoCifgWithPeepholeWithProjectionWithLayerNormTestImpl<armnn::DataType::Float32>(
1952 workloadFactory, memoryManager, input, expectedOutput);
1953}
1954
1955LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionTest(
1956 armnn::IWorkloadFactory& workloadFactory,
1957 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1958{
1959 const float qScale = 1.0f;
1960 const int32_t qOffset = 0;
1961
1962 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1963 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1964
1965 armnn::TensorInfo inputDesc({2, 2}, datatype);
1966 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1967 std::vector<float>{2., 3., 3., 4.}));
1968
1969 armnn::TensorInfo outputDesc({2, 4}, datatype);
1970 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1971 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
1972 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
1973
1974 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
1975 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
1976
1977}
1978
1979LayerTestResult<int16_t, 2> LstmLayerInt16WithCifgWithPeepholeNoProjectionTest(
1980 armnn::IWorkloadFactory& workloadFactory,
1981 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
1982{
1983 const float qScale = 1.0f;
1984 const int32_t qOffset = 0;
1985
1986 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
1987 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
1988
1989 armnn::TensorInfo inputDesc({ 2, 2 }, datatype);
1990 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale, qOffset,
1991 std::vector<float>({ 2., 3., 3., 4. })));
1992
1993 armnn::TensorInfo outputDesc({ 2, 4 }, datatype);
1994 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
1995 qOffset, std::vector<float>(
1996 {-0.36444446f, -0.00352185f, 0.12886585f, -0.05163646f,
1997 -0.42734814f, -0.00478661f, 0.13455015f, -0.03560682f})));
1998
1999 return LstmLayerWithCifgWithPeepholeNoProjectionTestImpl<datatype>(
2000 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2001}
2002
2003LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgWithPeepholeWithProjectionTest(
2004 armnn::IWorkloadFactory& workloadFactory,
2005 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2006{
2007 const float qScale = 2.0f;
2008 const int32_t qOffset = 0;
2009
2010 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16;
2011 const armnn::DataType constantDatatype = armnn::DataType::QuantisedAsymm8;
2012
2013 armnn::TensorInfo inputDesc({ 2, 5 }, datatype);
2014 boost::multi_array<int16_t, 2> input = MakeTensor<int16_t, 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2015 qOffset, std::vector<float>(
2016 {0.787926f, 0.151646f, 0.071352f, 0.118426f, 0.458058f,
2017 0.295743f, 0.544053f, 0.690064f, 0.858138f, 0.497181f})));
2018
2019 armnn::TensorInfo outputDesc({ 2, 16 }, datatype);
2020 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2021 qOffset, std::vector<float>(
2022 {-0.00396806f, 0.029352f, -0.00279226f, 0.0159977f, -0.00835576f,
2023 -0.0211779f, 0.0283512f, -0.0114597f, 0.00907307f, -0.0244004f,
2024 -0.0152191f, -0.0259063f, 0.00914318f, 0.00415118f, 0.017147f,
2025 0.0134203f, -0.013869f, 0.0287268f, -0.00334693f, 0.00733398f, -0.0287926f,
2026 -0.0186926f, 0.0193662f, -0.0115437f, 0.00422612f, -0.0345232f,
2027 0.00223253f, -0.00957321f, 0.0210624f, 0.013331f, 0.0150954f, 0.02168f})));
2028
2029 return LstmLayerNoCifgWithPeepholeWithProjectionTestImpl<datatype>(
2030 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, constantDatatype);
2031}
2032
2033LayerTestResult<int16_t, 2> LstmLayerInt16NoCifgNoPeepholeNoProjectionInt16ConstantTest(
2034 armnn::IWorkloadFactory& workloadFactory,
2035 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2036{
2037 const float qScale = 1.0f;
2038 const int32_t qOffset = 0;
2039
2040 const armnn::DataType datatype = armnn::DataType::QuantisedSymm16; // datatype & constants set to QSymm16
2041
2042 armnn::TensorInfo inputDesc({2, 2}, datatype);
2043 boost::multi_array<int16_t , 2> input = MakeTensor<int16_t , 2>(inputDesc, QuantizedVector<int16_t>(qScale,
2044 qOffset, std::vector<float>{2., 3., 3., 4.}));
2045
2046 armnn::TensorInfo outputDesc({2, 4}, datatype);
2047 boost::multi_array<int16_t, 2> expectedOutput = MakeTensor<int16_t, 2>(outputDesc, QuantizedVector<int16_t>(qScale,
2048 qOffset, std::vector<float>({{-0.02973187f, 0.1229473f, 0.20885126f, -0.15358765f,
2049 -0.0185422f, 0.11281417f, 0.24466537f, -0.1826292f}})));
2050
2051 return LstmNoCifgNoPeepholeNoProjectionTestImpl<datatype>(
2052 workloadFactory, memoryManager, input, expectedOutput, qScale, qOffset, datatype);
2053}
2054
2055//
2056// QuantizedLstm
2057//
2058
2059LayerTestResult<uint8_t, 2> QuantizedLstmTest(
2060 armnn::IWorkloadFactory& workloadFactory,
2061 const armnn::IBackendInternal::IMemoryManagerSharedPtr& memoryManager)
2062{
2063 armnn::TensorInfo inputDesc({2, 2}, armnn::DataType::QuantisedAsymm8);
2064 boost::multi_array<uint8_t, 2> input = MakeTensor<uint8_t, 2>(inputDesc, std::vector<uint8_t>(
2065 {166, 179, 50, 150}));
2066
2067 armnn::TensorInfo outputDesc({2, 4}, armnn::DataType::QuantisedAsymm8);
2068 boost::multi_array<uint8_t, 2> expectedOutput = MakeTensor<uint8_t, 2>(outputDesc, std::vector<uint8_t>(
2069 {140, 151, 146, 112, 136, 156, 142, 112 }));
2070
2071 return QuantizedLstmTestImpl(workloadFactory, memoryManager, input, expectedOutput);
2072}